Exemplo n.º 1
0
 def test_calculate_bucket_start(self):
     stats = MetricsBucketAggregator('myhost', interval=10)
     assert stats.calculate_bucket_start(13284283) == 13284280
     assert stats.calculate_bucket_start(13284280) == 13284280
     stats = MetricsBucketAggregator('myhost', interval=5)
     assert stats.calculate_bucket_start(13284287) == 13284285
     assert stats.calculate_bucket_start(13284280) == 13284280
Exemplo n.º 2
0
    def test_sets_flush_during_bucket(self):
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        self.sleep_for_interval_length(ag_interval)
        stats.submit_packets('my.set:40|s')

        # Assert that it's treated normally.
        metrics = stats.flush()

        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        self.sleep_for_interval_length(ag_interval)
        metrics = stats.flush()
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 1

        # Assert there are no more sets
        assert not stats.flush()
Exemplo n.º 3
0
    def test_gauge_flush_during_bucket(self):
        # Tests returning data when flush is called in the middle of a time bucket that has data
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')
        self.sleep_for_interval_length(ag_interval)
        stats.submit_packets('my.second.gauge:9.5|g')

        # Ensure that gauges roll up correctly.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5
Exemplo n.º 4
0
    def test_gauge_buckets(self):
        # Tests calling returing data from 2 time buckets
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')
        self.sleep_for_interval_length(ag_interval)
        stats.submit_packets('my.second.gauge:9.5|g')

        # Ensure that gauges roll up correctly.
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3

        first, second, second_b = metrics

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second_b['metric'] == 'my.second.gauge'
        assert second_b['points'][0][1] == 9.5

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5

        # check that they come back empty
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 0
Exemplo n.º 5
0
def init(config_path=None, use_watchdog=False, use_forwarder=False, args=None):
    """Configure the server and the reporting thread.
    """
    c = get_config(parse_args=False, cfg_path=config_path)

    if not c['use_dogstatsd'] and \
        (args and args[0] in ['start', 'restart'] or not args):
        log.info("Dogstatsd is disabled. Exiting")
        # We're exiting purposefully, so exit with zero (supervisor's expected
        # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly
        # and thus can exit cleanly.
        sleep(4)
        sys.exit(0)

    log.debug("Configurating     dogstatsd")

    port = c['dogstatsd_port']
    interval = DOGSTATSD_FLUSH_INTERVAL
    api_key = c['api_key']
    aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE
    non_local_traffic = c['non_local_traffic']
    forward_to_host = c.get('statsd_forward_host')
    forward_to_port = c.get('statsd_forward_port')
    event_chunk_size = c.get('event_chunk_size')
    recent_point_threshold = c.get('recent_point_threshold', None)

    target = c['dd_url']
    if use_forwarder:
        target = c['dogstatsd_target']

    hostname = get_hostname(c)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    assert 0 < interval

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(c))

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog,
                        event_chunk_size)

    # Start the server on an IPv4 stack
    # Default to loopback
    server_host = c['bind_host']
    # If specified, bind to all addressses
    if non_local_traffic:
        server_host = ''

    server = Server(aggregator,
                    server_host,
                    port,
                    forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port)

    return reporter, server, c
Exemplo n.º 6
0
    def test_event_tags(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('_e{6,4}:title1|text')
        stats.submit_packets('_e{6,4}:title2|text|#t1')
        stats.submit_packets('_e{6,4}:title3|text|#t1,t2:v2,t3,t4')
        stats.submit_packets('_e{6,4}:title4|text|k:key|p:normal|#t1,t2')

        events = self.sort_events(stats.flush_events())

        assert len(events) == 4
        first, second, third, fourth = events

        try:
            first['tags']
        except Exception:
            assert True
        else:
            assert False, "event['tags'] shouldn't be defined when no tags aren't explicited in the packet"
        assert first['msg_title'] == 'title1'
        assert first['msg_text'] == 'text'

        assert second['msg_title'] == 'title2'
        assert second['msg_text'] == 'text'
        assert second['tags'] == sorted(['t1'])

        assert third['msg_title'] == 'title3'
        assert third['msg_text'] == 'text'
        assert third['tags'] == sorted(['t1', 't2:v2', 't3', 't4'])

        assert fourth['msg_title'] == 'title4'
        assert fourth['msg_text'] == 'text'
        assert fourth['aggregation_key'] == 'key'
        assert fourth['priority'] == 'normal'
        assert fourth['tags'] == sorted(['t1', 't2'])
Exemplo n.º 7
0
    def test_dogstatsd_aggregation_perf(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    # metrics
                    ma.submit_packets('counter.%s:%s|c' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h' % (j, i))
                    ma.submit_packets('set.%s:%s|s' % (j, 1.0))

                    # tagged metrics
                    ma.submit_packets('counter.%s:%s|c|#tag1,tag2' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|#tag1,tag2' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|#tag1,tag2' % (j, i))
                    ma.submit_packets('set.%s:%s|s|#tag1,tag2' % (j, i))

                    # sampled metrics
                    ma.submit_packets('counter.%s:%s|c|@0.5' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|@0.5' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|@0.5' % (j, i))
                    ma.submit_packets('set.%s:%s|s|@0.5' % (j, i))

            ma.flush()
Exemplo n.º 8
0
def init5(agent_config=None, use_watchdog=False, use_forwarder=False, args=None):
    """Configure the server and the reporting thread.
    """
    if (not agent_config['use_dogstatsd'] and
            (args and args[0] in ['start', 'restart'] or not args)):
        log.info("StsStatsd is disabled. Exiting")
        # We're exiting purposefully, so exit with zero (supervisor's expected
        # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly
        # and thus can exit cleanly.
        sleep(4)
        sys.exit(0)

    port = agent_config['dogstatsd_port']
    interval = DOGSTATSD_FLUSH_INTERVAL
    api_key = agent_config['api_key']
    aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE
    non_local_traffic = agent_config['non_local_traffic']
    forward_to_host = agent_config.get('statsd_forward_host')
    forward_to_port = agent_config.get('statsd_forward_port')
    event_chunk_size = agent_config.get('event_chunk_size')
    recent_point_threshold = agent_config.get('recent_point_threshold', None)
    so_rcvbuf = agent_config.get('statsd_so_rcvbuf', None)
    server_host = agent_config['bind_host']

    target = agent_config['dd_url']
    if use_forwarder:
        target = agent_config['dogstatsd_target']

    hostname = get_hostname(agent_config)
    log.debug("Using hostname \"%s\"", hostname)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    assert 0 < interval

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(agent_config),
        histogram_aggregates=agent_config.get('histogram_aggregates'),
        histogram_percentiles=agent_config.get('histogram_percentiles'),
        utf8_decoding=agent_config['utf8_decoding']
    )

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog, event_chunk_size)

    # NOTICE: when `non_local_traffic` is passed we need to bind to any interface on the box. The forwarder uses
    # Tornado which takes care of sockets creation (more than one socket can be used at once depending on the
    # network settings), so it's enough to just pass an empty string '' to the library.
    # In Dogstatsd we use a single, fullstack socket, so passing '' as the address doesn't work and we default to
    # '0.0.0.0'. If someone needs to bind Dogstatsd to the IPv6 '::', they need to turn off `non_local_traffic` and
    # use the '::' meta address as `bind_host`.
    if non_local_traffic:
        server_host = '0.0.0.0'

    server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port, so_rcvbuf=so_rcvbuf)

    return reporter, server
Exemplo n.º 9
0
    def test_tags(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('gauge:1|c')
        stats.submit_packets('gauge:2|c|@1')
        stats.submit_packets('gauge:4|c|#tag1,tag2')
        stats.submit_packets(
            'gauge:8|c|#tag2,tag1')  # Should be the same as above
        stats.submit_packets('gauge:16|c|#tag3,tag4')

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 3
        first, second, third = metrics

        assert first['metric'] == 'gauge'
        assert first['tags'] is None
        assert first['points'][0][1] == 3
        assert first['host'] == 'myhost'

        assert second['metric'] == 'gauge'
        assert second['tags'] == ('tag1', 'tag2')
        assert second['points'][0][1] == 12
        assert second['host'] == 'myhost'

        assert third['metric'] == 'gauge'
        assert third['tags'] == ('tag3', 'tag4')
        assert third['points'][0][1] == 16
        assert third['host'] == 'myhost'
Exemplo n.º 10
0
    def test_sets_buckets(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        self.sleep_for_interval_length()
        stats.submit_packets('my.set:40|s')

        # Assert that it's treated normally.
        self.sleep_for_interval_length()
        metrics = stats.flush()

        assert len(metrics) == 2
        m, m2 = metrics
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        assert m2['metric'] == 'my.set'
        assert m2['points'][0][1] == 1

        # Assert there are no more sets
        assert not stats.flush()
Exemplo n.º 11
0
    def test_histogram_flush_during_bucket(self):
        ag_interval = 1
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        self.wait_for_bucket_boundary(ag_interval)
        percentiles = list(range(100))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        time.sleep(
            self.BUCKET_BOUNDARY_TOLERANCE
        )  # Make sure that we'll wait for the _next_ bucket boundary
        self.wait_for_bucket_boundary(ag_interval)
        percentiles = list(range(50))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 6
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
        assert p95['metric'] == 'my.p.95percentile'
        self.assert_almost_equal(p95['points'][0][1], 95, 10)
        self.assert_almost_equal(pmax['points'][0][1], 99, 1)
        self.assert_almost_equal(pmed['points'][0][1], 50, 2)
        self.assert_almost_equal(pavg['points'][0][1], 50, 2)
        self.assert_almost_equal(pmin['points'][0][1], 1, 1)
        assert pcount['points'][0][1] == 4000  # 100 * 20 * 2
        assert p95['host'] == 'myhost'

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 6
        p95_b, pavg_b, pcount_b, pmax_b, pmed_b, pmin_b = self.sort_metrics(
            metrics)
        assert p95_b['metric'] == 'my.p.95percentile'
        self.assert_almost_equal(p95_b['points'][0][1], 47, 10)
        self.assert_almost_equal(pmax_b['points'][0][1], 49, 1)
        self.assert_almost_equal(pmed_b['points'][0][1], 25, 2)
        self.assert_almost_equal(pavg_b['points'][0][1], 25, 2)
        self.assert_almost_equal(pmin_b['points'][0][1], 1, 1)
        assert pcount_b['points'][0][1] == 2000  # 100 * 20 * 2

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
Exemplo n.º 12
0
def init_dogstatsd(config):
    api_key = config['api_key']
    recent_point_threshold = config.get('recent_point_threshold', None)
    server_host = config['dogstatsd']['bind_host']
    dd_url = config['dd_url']
    port = config['dogstatsd']['port']
    forward_to_host = config['dogstatsd'].get('forward_host')
    forward_to_port = config['dogstatsd'].get('forward_port')
    non_local_traffic = config['dogstatsd'].get('non_local_traffic')
    so_rcvbuf = config['dogstatsd'].get('so_rcvbuf')
    utf8_decoding = config['dogstatsd'].get('utf8_decoding')

    interval = DOGSTATSD_FLUSH_INTERVAL
    aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE

    hostname = get_hostname()

    # get proxy settings
    proxies = get_proxy()

    forwarder = Forwarder(
        api_key,
        dd_url,
        proxies=proxies,
    )
    forwarder.start()

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(config),
        histogram_aggregates=config.get('histogram_aggregates'),
        histogram_percentiles=config.get('histogram_percentiles'),
        utf8_decoding=utf8_decoding
    )
    # serializer
    serializer = Serializer(
        aggregator,
        forwarder,
    )

    reporter = Reporter(interval, aggregator, serializer, api_key,
                        use_watchdog=False, hostname=hostname)

    # NOTICE: when `non_local_traffic` is passed we need to bind to any interface on the box. The forwarder uses
    # Tornado which takes care of sockets creation (more than one socket can be used at once depending on the
    # network settings), so it's enough to just pass an empty string '' to the library.
    # In Dogstatsd we use a single, fullstack socket, so passing '' as the address doesn't work and we default to
    # '0.0.0.0'. If someone needs to bind Dogstatsd to the IPv6 '::', they need to turn off `non_local_traffic` and
    # use the '::' meta address as `bind_host`.
    if non_local_traffic:
        server_host = '0.0.0.0'

    server = Server(aggregator, server_host, port, forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port, so_rcvbuf=so_rcvbuf)

    return reporter, server, forwarder
Exemplo n.º 13
0
def init(config_path=None,
         use_watchmonitor=False,
         use_forwarder=False,
         args=None):
    c = get_config(parse_args=False, cfg_path=config_path)

    if (not c['use_monitorstatsd']
            and (args and args[0] in ['start', 'restart'] or not args)):
        log.info("Monitorstatsd is disabled. Exiting")
        sleep(4)
        sys.exit(0)

    log.debug("Configuring monitorstatsd")

    port = c['monitorstatsd_port']
    interval = monitorSTATSD_FLUSH_INTERVAL
    api_key = c['api_key']
    aggregator_interval = monitorSTATSD_AGGREGATOR_BUCKET_SIZE
    non_local_traffic = c['non_local_traffic']
    forward_to_host = c.get('statsd_forward_host')
    forward_to_port = c.get('statsd_forward_port')
    event_chunk_size = c.get('event_chunk_size')
    recent_point_threshold = c.get('recent_point_threshold', None)
    ip = c.get('ip', "unknown")

    target = c['m_url']
    if use_forwarder:
        target = c['monitorstatsd_target']

    hostname = get_hostname(c)

    assert 0 < interval

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(c),
        histogram_aggregates=c.get('histogram_aggregates'),
        histogram_percentiles=c.get('histogram_percentiles'),
        utf8_decoding=c['utf8_decoding'])

    reporter = Reporter(c, interval, aggregator, target, api_key,
                        use_watchmonitor, event_chunk_size)

    server_host = c['bind_host']
    if non_local_traffic:
        server_host = ''

    server = Server(aggregator,
                    server_host,
                    port,
                    forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port)

    return reporter, server, c
Exemplo n.º 14
0
 def test_sampled_counter(self):
     # Submit a sampled counter.
     stats = MetricsBucketAggregator('myhost', interval=self.interval)
     stats.submit_packets('sampled.counter:1|c|@0.5')
     self.sleep_for_interval_length()
     metrics = stats.flush()
     assert len(metrics) == 1
     m = metrics[0]
     assert m['metric'] == 'sampled.counter'
     assert m['points'][0][1] == 2
Exemplo n.º 15
0
    def test_scientific_notation(self):
        ag_interval = 10
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        stats.submit_packets('test.scinot:9.512901e-05|g')
        self.sleep_for_interval_length(ag_interval)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1
        ts, val = metrics[0].get('points')[0]
        self.assert_almost_equal(val, 9.512901e-05)
Exemplo n.º 16
0
    def test_counter_buckets(self):
        ag_interval = 5
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets("my.first.counter:%s|c" % (1 * ag_interval))
        stats.submit_packets("my.second.counter:%s|c" % (1 * ag_interval))
        stats.submit_packets("my.third.counter:%s|c" % (3 * ag_interval))
        time.sleep(ag_interval)
        stats.submit_packets("my.first.counter:%s|c" % (5 * ag_interval))

        # Want to get 2 different entries for my.first.counter in one set of metrics,
        #  so wait for the time bucket interval to pass
        self.sleep_for_interval_length(ag_interval)
        # Ensure they roll up nicely.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 6

        first, first_b, second, second_b, third, third_b = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 1
        assert first['host'] == 'myhost'

        assert first_b['metric'] == 'my.first.counter'
        assert first_b['points'][0][1] == 5
        assert (first_b['points'][0][0] - first['points'][0][0]) == ag_interval

        assert first['points'][0][0] % ag_interval == 0
        assert first_b['points'][0][0] % ag_interval == 0

        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 1
        assert second_b['metric'] == 'my.second.counter'
        assert second_b['points'][0][1] == 0

        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 3
        assert third_b['metric'] == 'my.third.counter'
        assert third_b['points'][0][1] == 0

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 0

        self.sleep_for_interval_length(ag_interval)
        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 0
        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 0
        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 0
Exemplo n.º 17
0
    def test_diagnostic_stats(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        for i in range(10):
            stats.submit_packets('metric:10|c')
        stats.send_packet_count('datadog.dogstatsd.packet.count')

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2
        first, second = metrics

        assert first['metric'] == 'datadog.dogstatsd.packet.count'
        assert first['points'][0][1] == 10
Exemplo n.º 18
0
    def test_batch_submission(self):
        # Submit a sampled histogram.
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        metrics = ['counter:1|c', 'counter:1|c', 'gauge:1|g']
        packet = "\n".join(metrics)
        stats.submit_packets(packet)

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2
        counter, gauge = metrics
        assert counter['points'][0][1] == 2
        assert gauge['points'][0][1] == 1
Exemplo n.º 19
0
    def test_gauge_sample_rate(self):
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        # Submit a sampled gauge metric.
        stats.submit_packets('sampled.gauge:10|g|@0.1')

        # Assert that it's treated normally.
        self.sleep_for_interval_length(ag_interval)
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'sampled.gauge'
        assert m['points'][0][1] == 10
Exemplo n.º 20
0
def init(config_path=None, use_watchdog=False, use_forwarder=False):
    """Configure the server and the reporting thread.
    """
    c = get_config(parse_args=False, cfg_path=config_path)
    log.debug("Configuration dogstatsd")

    port = c['dogstatsd_port']
    interval = int(c['dogstatsd_interval'])
    aggregator_interval = int(c['dogstatsd_agregator_bucket_size'])
    api_key = c['api_key']
    non_local_traffic = c['non_local_traffic']
    forward_to_host = c.get('statsd_forward_host')
    forward_to_port = c.get('statsd_forward_port')
    event_chunk_size = c.get('event_chunk_size')

    target = c['dd_url']
    if use_forwarder:
        target = c['dogstatsd_target']

    hostname = get_hostname(c)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    assert 0 < interval

    aggregator = MetricsBucketAggregator(hostname,
                                         aggregator_interval,
                                         recent_point_threshold=c.get(
                                             'recent_point_threshold', None))

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog,
                        event_chunk_size)

    # Start the server on an IPv4 stack
    # Default to loopback
    server_host = c['bind_host']
    # If specified, bind to all addressses
    if non_local_traffic:
        server_host = ''

    server = Server(aggregator,
                    server_host,
                    port,
                    forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port)

    return reporter, server, c
Exemplo n.º 21
0
    def test_sampled_histogram(self):
        # Submit a sampled histogram.
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=self.interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        stats.submit_packets('sampled.hist:5|h|@0.5')

        # Assert we scale up properly.
        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)

        assert pcount['points'][0][1] == 2
        for p in [p95, pavg, pmed, pmax, pmin]:
            assert p['points'][0][1] == 5
Exemplo n.º 22
0
    def test_event_text(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('_e{2,0}:t1|')
        stats.submit_packets('_e{2,12}:t2|text|content')
        stats.submit_packets(
            '_e{2,23}:t3|First line\\nSecond line')  # \n is a newline
        stats.submit_packets(
            '_e{2,19}:t4|♬ †øU †øU ¥ºu T0µ ♪')  # utf-8 compliant

        events = self.sort_events(stats.flush_events())

        assert len(events) == 4
        first, second, third, fourth = events

        assert first['msg_text'] == ''
        assert second['msg_text'] == 'text|content'
        assert third['msg_text'] == 'First line\nSecond line'
        assert fourth['msg_text'] == '♬ †øU †øU ¥ºu T0µ ♪'
Exemplo n.º 23
0
    def test_histogram_normalization(self):
        ag_interval = 10
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        for i in range(5):
            stats.submit_packets('h1:1|h')
        for i in range(20):
            stats.submit_packets('h2:1|h')

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        _, _, h1count, _, _, _, _, _, h2count, _, _, _ = metrics

        assert h1count['points'][0][1] == 0.5
        assert h2count['points'][0][1] == 2
Exemplo n.º 24
0
    def test_event_title(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('_e{0,4}:|text')
        stats.submit_packets('_e{9,4}:2intitulé|text')
        stats.submit_packets('_e{14,4}:3title content|text')
        stats.submit_packets('_e{14,4}:4title|content|text')
        stats.submit_packets(
            '_e{13,4}:5title\\ntitle|text')  # \n stays escaped

        events = self.sort_events(stats.flush_events())

        assert len(events) == 5
        first, second, third, fourth, fifth = events

        assert first['msg_title'] == ''
        assert second['msg_title'] == '2intitulé'
        assert third['msg_title'] == '3title content'
        assert fourth['msg_title'] == '4title|content'
        assert fifth['msg_title'] == '5title\\ntitle'
Exemplo n.º 25
0
    def test_string_sets(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('my.set:string|s')
        stats.submit_packets('my.set:sets|s')
        stats.submit_packets('my.set:sets|s')
        stats.submit_packets('my.set:test|s')
        stats.submit_packets('my.set:test|s')
        stats.submit_packets('my.set:test|s')

        # Assert that it's treated normally.
        self.sleep_for_interval_length()
        metrics = stats.flush()

        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        # Assert there are no more sets
        assert not stats.flush()
        self.sleep_for_interval_length()
        assert not stats.flush()
Exemplo n.º 26
0
    def test_empty_counter(self):
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        # Should be an empty list
        assert len(metrics) == 0

        # Track some counters.
        stats.submit_packets('my.first.counter:%s|c' % (1 * ag_interval))
        # Call flush before the bucket_length has been exceeded
        metrics = self.sort_metrics(stats.flush())
        # Should be an empty list
        assert len(metrics) == 0

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        # Should now have the data
        assert len(metrics) == 1
        assert metrics[0]['metric'] == 'my.first.counter'
        assert metrics[0]['points'][0][1] == 1
Exemplo n.º 27
0
    def test_gauge(self):
        ag_interval = 2
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')

        # Ensure that gauges roll up correctly.
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5

        # Ensure that old gauges get dropped due to old timestamps
        stats.submit_metric('my.first.gauge', 5, 'g')
        stats.submit_metric('my.first.gauge', 1, 'g', timestamp=1000000000)
        stats.submit_metric('my.second.gauge', 20, 'g', timestamp=1000000000)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'
Exemplo n.º 28
0
def init(server_host,
         port,
         timeout=UDP_SOCKET_TIMEOUT,
         aggregator_interval=DOGSTATSD_AGGREGATOR_BUCKET_SIZE):
    """Configure the server and the reporting thread.
    """

    log.debug("Configuring dogstatsd")

    hostname = None

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=None,
        formatter=None,
        histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES,
        histogram_percentiles=DEFAULT_HISTOGRAM_PERCENTILES,
        utf8_decoding=True,
    )

    server = Server(aggregator, server_host, port, timeout=timeout)

    return server
Exemplo n.º 29
0
    def test_dogstatsd_ascii_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(
                        self.create_event_packet(
                            'asldkfj fdsaljfas dflksjafs fasdfkjaldsfkjasldf',
                            """alkdjfa slfalskdfjas lkfdjaoisudhfalsdkjbfaksdhfbasjdk fa;sf ljda fsafksadfh alsdjfhaskjdfgahls d;fjasdlkfh9823udjs dlfhaspdf98as ufdaksjhfaisdhufalskdjfhas df"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'kdjfsofuousodifu982309rijdfsljsd  dfsdf sdf',
                            """dflskjdfs8d9fsdfjs sldfjka ;dlfjapfoia jsdflakjsdfp 0adsfuolwejf wflsdjf lsdkjf0saoiufja dlfjasd of;lasdjf ;askdjf asodfhas lkmfbashudf asd,fasdfna s,dfjas lcjx vjaskdlfjals dfkjasdflk jasldfkj asldkfjas ldfkasjdf a"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'asdf askdjf asldfkjsad lfkajsdlfksajd fasdfsdfdf',
                            """skdfjsld flskdjf alksdjfpasdofuapo sdfjalksdjf ;as.kjdf ;ljLKJL :KJL:KJ l;kdsjf ;lkj :Lkj FLDKFJ LSKFDJ ;LDFJ SLKDJF KSDLjf: Lfjldkj fLKDSJf lSKDjf ls;kdjf s;lkfjs L:KAJ :LFKJDL:DKjf L:SKjf;lKDJfl;SKJDf :LKSDj;lsdfj fsdljfsd ofisunafoialjsflmsdifjas;dlkfaj sdfkasjd flaksjdfnpmsao;difjkas dfnlaksdfa;sodljfas lfdjasdflmajsdlfknaf98wouanepr9qo3ud fadspuf oaisdufpoasid fj askdjn LKJH LKJHFL KJDHSF DSFLHSL JKDFHLSK DJFHLS KJDFHS"""
                        ))

            ma.flush()
Exemplo n.º 30
0
    def test_dogstatsd_utf8_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(
                        self.create_event_packet(
                            'Τη γλώσσα μου έδωσαν ελληνική',
                            """τὸ σπίτι φτωχικὸ στὶς ἀμμουδιὲς τοῦ Ὁμήρου. Μονάχη ἔγνοια ἡ γλῶσσα μου στὶς ἀμμουδιὲς τοῦ Ὁμήρου. ἀπὸ τὸ Ἄξιον ἐστί τοῦ Ὀδυσσέα Ἐλύτη"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'ვეპხის ტყაოსანი შოთა რუსთაველი',
                            """ღმერთსი შემვედრე, ნუთუ კვლა დამხსნას სოფლისა შრომასა, ცეცხლს, წყალსა და მიწასა, ჰაერთა თანა მრომასა; მომცნეს ფრთენი და აღვფრინდე, მივჰხვდე მას ჩემსა ნდომასა, დღისით და ღამით ვჰხედვიდე მზისა ელვათა კრთომაასა.
                        """))
                    ma.submit_packets(
                        self.create_event_packet(
                            'Traité sur la tolérance',
                            """Ose supposer qu'un Ministre éclairé & magnanime, un Prélat humain & sage, un Prince qui sait que son intérêt consiste dans le grand nombre de ses Sujets, & sa gloire dans leur bonheur, daigne jetter les yeux sur cet Ecrit informe & défectueux; il y supplée par ses propres lumieres; il se dit à lui-même: Que risquerai-je à voir la terre cultivée & ornée par plus de mains laborieuses, les tributs augmentés, l'Etat plus florissant?"""
                        ))

            ma.flush()