Esempio n. 1
0
    def test_gauge_flush_during_bucket(self):
        # Tests returning data when flush is called in the middle of a time bucket that has data
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')
        self.sleep_for_interval_length(ag_interval)
        stats.submit_packets('my.second.gauge:9.5|g')

        # Ensure that gauges roll up correctly.
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        first, second = metrics

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5
Esempio n. 2
0
    def test_gauge_buckets(self):
        # Tests calling returing data from 2 time buckets
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)
        self.wait_for_bucket_boundary(ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.gauge:1|g')
        stats.submit_packets('my.first.gauge:5|g')
        stats.submit_packets('my.second.gauge:1.5|g')
        self.sleep_for_interval_length(ag_interval)
        stats.submit_packets('my.second.gauge:9.5|g')

        # Ensure that gauges roll up correctly.
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3

        first, second, second_b = metrics

        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        assert second_b['metric'] == 'my.second.gauge'
        assert second_b['points'][0][1] == 9.5

        assert second['metric'] == 'my.second.gauge'
        assert second['points'][0][1] == 1.5

        # check that they come back empty
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 0
Esempio n. 3
0
 def test_sampled_counter(self):
     # Submit a sampled counter.
     stats = MetricsBucketAggregator('myhost', interval=self.interval)
     stats.submit_packets('sampled.counter:1|c|@0.5')
     self.sleep_for_interval_length()
     metrics = stats.flush()
     assert len(metrics) == 1
     m = metrics[0]
     assert m['metric'] == 'sampled.counter'
     assert m['points'][0][1] == 2
Esempio n. 4
0
    def test_histogram_flush_during_bucket(self):
        ag_interval = 1
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        self.wait_for_bucket_boundary(ag_interval)
        percentiles = list(range(100))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        time.sleep(
            self.BUCKET_BOUNDARY_TOLERANCE
        )  # Make sure that we'll wait for the _next_ bucket boundary
        self.wait_for_bucket_boundary(ag_interval)
        percentiles = list(range(50))
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in range(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 6
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
        assert p95['metric'] == 'my.p.95percentile'
        self.assert_almost_equal(p95['points'][0][1], 95, 10)
        self.assert_almost_equal(pmax['points'][0][1], 99, 1)
        self.assert_almost_equal(pmed['points'][0][1], 50, 2)
        self.assert_almost_equal(pavg['points'][0][1], 50, 2)
        self.assert_almost_equal(pmin['points'][0][1], 1, 1)
        assert pcount['points'][0][1] == 4000  # 100 * 20 * 2
        assert p95['host'] == 'myhost'

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 6
        p95_b, pavg_b, pcount_b, pmax_b, pmed_b, pmin_b = self.sort_metrics(
            metrics)
        assert p95_b['metric'] == 'my.p.95percentile'
        self.assert_almost_equal(p95_b['points'][0][1], 47, 10)
        self.assert_almost_equal(pmax_b['points'][0][1], 49, 1)
        self.assert_almost_equal(pmed_b['points'][0][1], 25, 2)
        self.assert_almost_equal(pavg_b['points'][0][1], 25, 2)
        self.assert_almost_equal(pmin_b['points'][0][1], 1, 1)
        assert pcount_b['points'][0][1] == 2000  # 100 * 20 * 2

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
Esempio n. 5
0
    def test_scientific_notation(self):
        ag_interval = 10
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        stats.submit_packets('test.scinot:9.512901e-05|g')
        self.sleep_for_interval_length(ag_interval)

        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1
        ts, val = metrics[0].get('points')[0]
        self.assert_almost_equal(val, 9.512901e-05)
Esempio n. 6
0
    def test_batch_submission(self):
        # Submit a sampled histogram.
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        metrics = ['counter:1|c', 'counter:1|c', 'gauge:1|g']
        packet = "\n".join(metrics)
        stats.submit_packets(packet)

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2
        counter, gauge = metrics
        assert counter['points'][0][1] == 2
        assert gauge['points'][0][1] == 1
Esempio n. 7
0
    def test_gauge_sample_rate(self):
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        # Submit a sampled gauge metric.
        stats.submit_packets('sampled.gauge:10|g|@0.1')

        # Assert that it's treated normally.
        self.sleep_for_interval_length(ag_interval)
        metrics = stats.flush()
        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'sampled.gauge'
        assert m['points'][0][1] == 10
Esempio n. 8
0
def init(config_path=None, use_watchdog=False, use_forwarder=False, args=None):
    """Configure the server and the reporting thread.
    """
    c = get_config(parse_args=False, cfg_path=config_path)

    if not c['use_dogstatsd'] and \
        (args and args[0] in ['start', 'restart'] or not args):
        log.info("Dogstatsd is disabled. Exiting")
        # We're exiting purposefully, so exit with zero (supervisor's expected
        # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly
        # and thus can exit cleanly.
        sleep(4)
        sys.exit(0)

    log.debug("Configurating     dogstatsd")

    port = c['dogstatsd_port']
    interval = DOGSTATSD_FLUSH_INTERVAL
    api_key = c['api_key']
    aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE
    non_local_traffic = c['non_local_traffic']
    forward_to_host = c.get('statsd_forward_host')
    forward_to_port = c.get('statsd_forward_port')
    event_chunk_size = c.get('event_chunk_size')
    recent_point_threshold = c.get('recent_point_threshold', None)

    target = c['dd_url']
    if use_forwarder:
        target = c['dogstatsd_target']

    hostname = get_hostname(c)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    assert 0 < interval

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(c))

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog,
                        event_chunk_size)

    # Start the server on an IPv4 stack
    # Default to loopback
    server_host = c['bind_host']
    # If specified, bind to all addressses
    if non_local_traffic:
        server_host = ''

    server = Server(aggregator,
                    server_host,
                    port,
                    forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port)

    return reporter, server, c
Esempio n. 9
0
def init5(agent_config=None, use_watchdog=False, use_forwarder=False, args=None):
    """Configure the server and the reporting thread.
    """
    if (not agent_config['use_dogstatsd'] and
            (args and args[0] in ['start', 'restart'] or not args)):
        log.info("StsStatsd is disabled. Exiting")
        # We're exiting purposefully, so exit with zero (supervisor's expected
        # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly
        # and thus can exit cleanly.
        sleep(4)
        sys.exit(0)

    port = agent_config['dogstatsd_port']
    interval = DOGSTATSD_FLUSH_INTERVAL
    api_key = agent_config['api_key']
    aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE
    non_local_traffic = agent_config['non_local_traffic']
    forward_to_host = agent_config.get('statsd_forward_host')
    forward_to_port = agent_config.get('statsd_forward_port')
    event_chunk_size = agent_config.get('event_chunk_size')
    recent_point_threshold = agent_config.get('recent_point_threshold', None)
    so_rcvbuf = agent_config.get('statsd_so_rcvbuf', None)
    server_host = agent_config['bind_host']

    target = agent_config['dd_url']
    if use_forwarder:
        target = agent_config['dogstatsd_target']

    hostname = get_hostname(agent_config)
    log.debug("Using hostname \"%s\"", hostname)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    assert 0 < interval

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(agent_config),
        histogram_aggregates=agent_config.get('histogram_aggregates'),
        histogram_percentiles=agent_config.get('histogram_percentiles'),
        utf8_decoding=agent_config['utf8_decoding']
    )

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog, event_chunk_size)

    # NOTICE: when `non_local_traffic` is passed we need to bind to any interface on the box. The forwarder uses
    # Tornado which takes care of sockets creation (more than one socket can be used at once depending on the
    # network settings), so it's enough to just pass an empty string '' to the library.
    # In Dogstatsd we use a single, fullstack socket, so passing '' as the address doesn't work and we default to
    # '0.0.0.0'. If someone needs to bind Dogstatsd to the IPv6 '::', they need to turn off `non_local_traffic` and
    # use the '::' meta address as `bind_host`.
    if non_local_traffic:
        server_host = '0.0.0.0'

    server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port, so_rcvbuf=so_rcvbuf)

    return reporter, server
Esempio n. 10
0
    def test_sampled_histogram(self):
        # Submit a sampled histogram.
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=self.interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        stats.submit_packets('sampled.hist:5|h|@0.5')

        # Assert we scale up properly.
        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)

        assert pcount['points'][0][1] == 2
        for p in [p95, pavg, pmed, pmax, pmin]:
            assert p['points'][0][1] == 5
Esempio n. 11
0
def init_dogstatsd(config):
    api_key = config['api_key']
    recent_point_threshold = config.get('recent_point_threshold', None)
    server_host = config['dogstatsd']['bind_host']
    dd_url = config['dd_url']
    port = config['dogstatsd']['port']
    forward_to_host = config['dogstatsd'].get('forward_host')
    forward_to_port = config['dogstatsd'].get('forward_port')
    non_local_traffic = config['dogstatsd'].get('non_local_traffic')
    so_rcvbuf = config['dogstatsd'].get('so_rcvbuf')
    utf8_decoding = config['dogstatsd'].get('utf8_decoding')

    interval = DOGSTATSD_FLUSH_INTERVAL
    aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE

    hostname = get_hostname()

    # get proxy settings
    proxies = get_proxy()

    forwarder = Forwarder(
        api_key,
        dd_url,
        proxies=proxies,
    )
    forwarder.start()

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(config),
        histogram_aggregates=config.get('histogram_aggregates'),
        histogram_percentiles=config.get('histogram_percentiles'),
        utf8_decoding=utf8_decoding
    )
    # serializer
    serializer = Serializer(
        aggregator,
        forwarder,
    )

    reporter = Reporter(interval, aggregator, serializer, api_key,
                        use_watchdog=False, hostname=hostname)

    # NOTICE: when `non_local_traffic` is passed we need to bind to any interface on the box. The forwarder uses
    # Tornado which takes care of sockets creation (more than one socket can be used at once depending on the
    # network settings), so it's enough to just pass an empty string '' to the library.
    # In Dogstatsd we use a single, fullstack socket, so passing '' as the address doesn't work and we default to
    # '0.0.0.0'. If someone needs to bind Dogstatsd to the IPv6 '::', they need to turn off `non_local_traffic` and
    # use the '::' meta address as `bind_host`.
    if non_local_traffic:
        server_host = '0.0.0.0'

    server = Server(aggregator, server_host, port, forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port, so_rcvbuf=so_rcvbuf)

    return reporter, server, forwarder
Esempio n. 12
0
def init(config_path=None,
         use_watchmonitor=False,
         use_forwarder=False,
         args=None):
    c = get_config(parse_args=False, cfg_path=config_path)

    if (not c['use_monitorstatsd']
            and (args and args[0] in ['start', 'restart'] or not args)):
        log.info("Monitorstatsd is disabled. Exiting")
        sleep(4)
        sys.exit(0)

    log.debug("Configuring monitorstatsd")

    port = c['monitorstatsd_port']
    interval = monitorSTATSD_FLUSH_INTERVAL
    api_key = c['api_key']
    aggregator_interval = monitorSTATSD_AGGREGATOR_BUCKET_SIZE
    non_local_traffic = c['non_local_traffic']
    forward_to_host = c.get('statsd_forward_host')
    forward_to_port = c.get('statsd_forward_port')
    event_chunk_size = c.get('event_chunk_size')
    recent_point_threshold = c.get('recent_point_threshold', None)
    ip = c.get('ip', "unknown")

    target = c['m_url']
    if use_forwarder:
        target = c['monitorstatsd_target']

    hostname = get_hostname(c)

    assert 0 < interval

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(c),
        histogram_aggregates=c.get('histogram_aggregates'),
        histogram_percentiles=c.get('histogram_percentiles'),
        utf8_decoding=c['utf8_decoding'])

    reporter = Reporter(c, interval, aggregator, target, api_key,
                        use_watchmonitor, event_chunk_size)

    server_host = c['bind_host']
    if non_local_traffic:
        server_host = ''

    server = Server(aggregator,
                    server_host,
                    port,
                    forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port)

    return reporter, server, c
Esempio n. 13
0
    def test_sets(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('my.set:10|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:20|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')
        stats.submit_packets('my.set:30|s')

        # Assert that it's treated normally.
        self.sleep_for_interval_length()
        metrics = stats.flush()

        assert len(metrics) == 1
        m = metrics[0]
        assert m['metric'] == 'my.set'
        assert m['points'][0][1] == 3

        # Assert there are no more sets
        assert not stats.flush()
Esempio n. 14
0
 def test_calculate_bucket_start(self):
     stats = MetricsBucketAggregator('myhost', interval=10)
     assert stats.calculate_bucket_start(13284283) == 13284280
     assert stats.calculate_bucket_start(13284280) == 13284280
     stats = MetricsBucketAggregator('myhost', interval=5)
     assert stats.calculate_bucket_start(13284287) == 13284285
     assert stats.calculate_bucket_start(13284280) == 13284280
Esempio n. 15
0
    def test_diagnostic_stats(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        for i in range(10):
            stats.submit_packets('metric:10|c')
        stats.send_packet_count('datadog.dogstatsd.packet.count')

        self.sleep_for_interval_length()
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2
        first, second = metrics

        assert first['metric'] == 'datadog.dogstatsd.packet.count'
        assert first['points'][0][1] == 10
Esempio n. 16
0
    def test_dogstatsd_utf8_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(
                        self.create_event_packet(
                            'Τη γλώσσα μου έδωσαν ελληνική',
                            """τὸ σπίτι φτωχικὸ στὶς ἀμμουδιὲς τοῦ Ὁμήρου. Μονάχη ἔγνοια ἡ γλῶσσα μου στὶς ἀμμουδιὲς τοῦ Ὁμήρου. ἀπὸ τὸ Ἄξιον ἐστί τοῦ Ὀδυσσέα Ἐλύτη"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'ვეპხის ტყაოსანი შოთა რუსთაველი',
                            """ღმერთსი შემვედრე, ნუთუ კვლა დამხსნას სოფლისა შრომასა, ცეცხლს, წყალსა და მიწასა, ჰაერთა თანა მრომასა; მომცნეს ფრთენი და აღვფრინდე, მივჰხვდე მას ჩემსა ნდომასა, დღისით და ღამით ვჰხედვიდე მზისა ელვათა კრთომაასა.
                        """))
                    ma.submit_packets(
                        self.create_event_packet(
                            'Traité sur la tolérance',
                            """Ose supposer qu'un Ministre éclairé & magnanime, un Prélat humain & sage, un Prince qui sait que son intérêt consiste dans le grand nombre de ses Sujets, & sa gloire dans leur bonheur, daigne jetter les yeux sur cet Ecrit informe & défectueux; il y supplée par ses propres lumieres; il se dit à lui-même: Que risquerai-je à voir la terre cultivée & ornée par plus de mains laborieuses, les tributs augmentés, l'Etat plus florissant?"""
                        ))

            ma.flush()
Esempio n. 17
0
    def test_dogstatsd_ascii_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(
                        self.create_event_packet(
                            'asldkfj fdsaljfas dflksjafs fasdfkjaldsfkjasldf',
                            """alkdjfa slfalskdfjas lkfdjaoisudhfalsdkjbfaksdhfbasjdk fa;sf ljda fsafksadfh alsdjfhaskjdfgahls d;fjasdlkfh9823udjs dlfhaspdf98as ufdaksjhfaisdhufalskdjfhas df"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'kdjfsofuousodifu982309rijdfsljsd  dfsdf sdf',
                            """dflskjdfs8d9fsdfjs sldfjka ;dlfjapfoia jsdflakjsdfp 0adsfuolwejf wflsdjf lsdkjf0saoiufja dlfjasd of;lasdjf ;askdjf asodfhas lkmfbashudf asd,fasdfna s,dfjas lcjx vjaskdlfjals dfkjasdflk jasldfkj asldkfjas ldfkasjdf a"""
                        ))
                    ma.submit_packets(
                        self.create_event_packet(
                            'asdf askdjf asldfkjsad lfkajsdlfksajd fasdfsdfdf',
                            """skdfjsld flskdjf alksdjfpasdofuapo sdfjalksdjf ;as.kjdf ;ljLKJL :KJL:KJ l;kdsjf ;lkj :Lkj FLDKFJ LSKFDJ ;LDFJ SLKDJF KSDLjf: Lfjldkj fLKDSJf lSKDjf ls;kdjf s;lkfjs L:KAJ :LFKJDL:DKjf L:SKjf;lKDJfl;SKJDf :LKSDj;lsdfj fsdljfsd ofisunafoialjsflmsdifjas;dlkfaj sdfkasjd flaksjdfnpmsao;difjkas dfnlaksdfa;sodljfas lfdjasdflmajsdlfknaf98wouanepr9qo3ud fadspuf oaisdufpoasid fj askdjn LKJH LKJHFL KJDHSF DSFLHSL JKDFHLSK DJFHLS KJDFHS"""
                        ))

            ma.flush()
Esempio n. 18
0
    def test_bad_packets_throw_errors(self):
        packets = [
            'missing.value.and.type',
            'missing.type:2',
            'missing.value|c',
            '2|c',
            'unknown.type:2|z',
            'string.value:abc|c',
            'string.sample.rate:0|c|@abc',
            # Bad event-like packets
            '_ev{1,2}:bad_header'
            '_e{1,}:invalid|headers',
            '_e:missing|size|headers',
            '_e:{1,1}:t|t|t:bad_meta|h',
        ]

        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        for packet in packets:
            try:
                stats.submit_packets(packet)
            except Exception:
                assert True
            else:
                assert False, 'invalid : %s' % packet
Esempio n. 19
0
    def test_counter(self):
        ag_interval = 1.0
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        # Track some counters.
        stats.submit_packets('my.first.counter:1|c')
        stats.submit_packets('my.first.counter:5|c')
        stats.submit_packets('my.second.counter:1|c')
        stats.submit_packets('my.third.counter:3|c')

        # Ensure they roll up nicely.
        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 3

        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 6
        assert first['host'] == 'myhost'

        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 1

        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 3

        self.sleep_for_interval_length(ag_interval)
        # Ensure that counters reset to zero.
        metrics = self.sort_metrics(stats.flush())
        first, second, third = metrics
        assert first['metric'] == 'my.first.counter'
        assert first['points'][0][1] == 0
        assert second['metric'] == 'my.second.counter'
        assert second['points'][0][1] == 0
        assert third['metric'] == 'my.third.counter'
        assert third['points'][0][1] == 0
    def test_sdstatsd_ascii_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(self.create_event_packet(
                        'asldkfj fdsaljfas dflksjafs fasdfkjaldsfkjasldf',
                        """alkdjfa slfalskdfjas lkfdjaoisudhfalsdkjbfaksdhfbasjdk fa;sf ljda fsafksadfh alsdjfhaskjdfgahls d;fjasdlkfh9823udjs dlfhaspdf98as ufdaksjhfaisdhufalskdjfhas df"""
                    ))
                    ma.submit_packets(self.create_event_packet(
                        'kdjfsofuousodifu982309rijdfsljsd  dfsdf sdf',
                        """dflskjdfs8d9fsdfjs sldfjka ;dlfjapfoia jsdflakjsdfp 0adsfuolwejf wflsdjf lsdkjf0saoiufja dlfjasd of;lasdjf ;askdjf asodfhas lkmfbashudf asd,fasdfna s,dfjas lcjx vjaskdlfjals dfkjasdflk jasldfkj asldkfjas ldfkasjdf a"""
                    ))
                    ma.submit_packets(self.create_event_packet(
                        'asdf askdjf asldfkjsad lfkajsdlfksajd fasdfsdfdf',
                        """skdfjsld flskdjf alksdjfpasdofuapo sdfjalksdjf ;as.kjdf ;ljLKJL :KJL:KJ l;kdsjf ;lkj :Lkj FLDKFJ LSKFDJ ;LDFJ SLKDJF KSDLjf: Lfjldkj fLKDSJf lSKDjf ls;kdjf s;lkfjs L:KAJ :LFKJDL:DKjf L:SKjf;lKDJfl;SKJDf :LKSDj;lsdfj fsdljfsd ofisunafoialjsflmsdifjas;dlkfaj sdfkasjd flaksjdfnpmsao;difjkas dfnlaksdfa;sodljfas lfdjasdflmajsdlfknaf98wouanepr9qo3ud fadspuf oaisdufpoasid fj askdjn LKJH LKJHFL KJDHSF DSFLHSL JKDFHLSK DJFHLS KJDFHS"""
                    ))

            ma.flush()
Esempio n. 21
0
def init(config_path=None, use_watchdog=False, use_forwarder=False):
    """Configure the server and the reporting thread.
    """
    c = get_config(parse_args=False, cfg_path=config_path)
    log.debug("Configuration dogstatsd")

    port = c['dogstatsd_port']
    interval = int(c['dogstatsd_interval'])
    aggregator_interval = int(c['dogstatsd_agregator_bucket_size'])
    api_key = c['api_key']
    non_local_traffic = c['non_local_traffic']
    forward_to_host = c.get('statsd_forward_host')
    forward_to_port = c.get('statsd_forward_port')
    event_chunk_size = c.get('event_chunk_size')

    target = c['dd_url']
    if use_forwarder:
        target = c['dogstatsd_target']

    hostname = get_hostname(c)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    assert 0 < interval

    aggregator = MetricsBucketAggregator(hostname,
                                         aggregator_interval,
                                         recent_point_threshold=c.get(
                                             'recent_point_threshold', None))

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog,
                        event_chunk_size)

    # Start the server on an IPv4 stack
    # Default to loopback
    server_host = c['bind_host']
    # If specified, bind to all addressses
    if non_local_traffic:
        server_host = ''

    server = Server(aggregator,
                    server_host,
                    port,
                    forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port)

    return reporter, server, c
    def test_sdstatsd_utf8_events(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    ma.submit_packets(self.create_event_packet(
                        'Τη γλώσσα μου έδωσαν ελληνική',
                        """τὸ σπίτι φτωχικὸ στὶς ἀμμουδιὲς τοῦ Ὁμήρου. Μονάχη ἔγνοια ἡ γλῶσσα μου στὶς ἀμμουδιὲς τοῦ Ὁμήρου. ἀπὸ τὸ Ἄξιον ἐστί τοῦ Ὀδυσσέα Ἐλύτη"""
                    ))
                    ma.submit_packets(self.create_event_packet(
                        'ვეპხის ტყაოსანი შოთა რუსთაველი',
                        """ღმერთსი შემვედრე, ნუთუ კვლა დამხსნას სოფლისა შრომასა, ცეცხლს, წყალსა და მიწასა, ჰაერთა თანა მრომასა; მომცნეს ფრთენი და აღვფრინდე, მივჰხვდე მას ჩემსა ნდომასა, დღისით და ღამით ვჰხედვიდე მზისა ელვათა კრთომაასა.
                        """
                    ))
                    ma.submit_packets(self.create_event_packet(
                        'Traité sur la tolérance',
                        """Ose supposer qu'un Ministre éclairé & magnanime, un Prélat humain & sage, un Prince qui sait que son intérêt consiste dans le grand nombre de ses Sujets, & sa gloire dans leur bonheur, daigne jetter les yeux sur cet Ecrit informe & défectueux; il y supplée par ses propres lumieres; il se dit à lui-même: Que risquerai-je à voir la terre cultivée & ornée par plus de mains laborieuses, les tributs augmentés, l'Etat plus florissant?"""
                    ))

            ma.flush()
Esempio n. 23
0
    def test_counter_normalization(self):
        ag_interval = 10
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        # Assert counters are normalized.
        stats.submit_packets('int:1|c')
        stats.submit_packets('int:4|c')
        stats.submit_packets('int:15|c')

        stats.submit_packets('float:5|c')

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 2

        floatc, intc = metrics

        assert floatc['metric'] == 'float'
        assert floatc['points'][0][1] == 0.5
        assert floatc['host'] == 'myhost'

        assert intc['metric'] == 'int'
        assert intc['points'][0][1] == 2
        assert intc['host'] == 'myhost'
Esempio n. 24
0
    def test_histogram_normalization(self):
        ag_interval = 10
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        for i in range(5):
            stats.submit_packets('h1:1|h')
        for i in range(20):
            stats.submit_packets('h2:1|h')

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        _, _, h1count, _, _, _, _, _, h2count, _, _, _ = metrics

        assert h1count['points'][0][1] == 0.5
        assert h2count['points'][0][1] == 2
Esempio n. 25
0
    def test_empty_counter(self):
        ag_interval = self.interval
        stats = MetricsBucketAggregator('myhost', interval=ag_interval)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        # Should be an empty list
        assert len(metrics) == 0

        # Track some counters.
        stats.submit_packets('my.first.counter:%s|c' % (1 * ag_interval))
        # Call flush before the bucket_length has been exceeded
        metrics = self.sort_metrics(stats.flush())
        # Should be an empty list
        assert len(metrics) == 0

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        # Should now have the data
        assert len(metrics) == 1
        assert metrics[0]['metric'] == 'my.first.counter'
        assert metrics[0]['points'][0][1] == 1
Esempio n. 26
0
def init(server_host,
         port,
         timeout=UDP_SOCKET_TIMEOUT,
         aggregator_interval=DOGSTATSD_AGGREGATOR_BUCKET_SIZE):
    """Configure the server and the reporting thread.
    """

    log.debug("Configuring dogstatsd")

    hostname = None

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=None,
        formatter=None,
        histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES,
        histogram_percentiles=DEFAULT_HISTOGRAM_PERCENTILES,
        utf8_decoding=True,
    )

    server = Server(aggregator, server_host, port, timeout=timeout)

    return server
    def test_histogram(self):
        ag_interval = self.interval
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        self.wait_for_bucket_boundary(ag_interval)

        # Sample all numbers between 1-100 many times. This
        # means our percentiles should be relatively close to themselves.
        percentiles = range(100)
        random.shuffle(percentiles)  # in place
        for i in percentiles:
            for j in xrange(20):
                for type_ in ['h', 'ms']:
                    m = 'my.p:%s|%s' % (i, type_)
                    stats.submit_packets(m)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())

        assert len(metrics) == 6
        p95, pavg, pcount, pmax, pmed, pmin = self.sort_metrics(metrics)
        assert p95['metric'] == 'my.p.95percentile'
        self.assert_almost_equal(p95['points'][0][1], 95, 10)
        self.assert_almost_equal(pmax['points'][0][1], 99, 1)
        self.assert_almost_equal(pmed['points'][0][1], 50, 2)
        self.assert_almost_equal(pavg['points'][0][1], 50, 2)
        self.assert_almost_equal(pmin['points'][0][1], 1, 1)
        assert pcount['points'][0][1] == 4000  # 100 * 20 * 2
        assert p95['host'] == 'myhost'

        # Ensure that histograms are reset.
        metrics = self.sort_metrics(stats.flush())
        assert not metrics
Esempio n. 28
0
    def test_event_title(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('_e{0,4}:|text')
        stats.submit_packets('_e{9,4}:2intitulé|text')
        stats.submit_packets('_e{14,4}:3title content|text')
        stats.submit_packets('_e{14,4}:4title|content|text')
        stats.submit_packets(
            '_e{13,4}:5title\\ntitle|text')  # \n stays escaped

        events = self.sort_events(stats.flush_events())

        assert len(events) == 5
        first, second, third, fourth, fifth = events

        assert first['msg_title'] == ''
        assert second['msg_title'] == '2intitulé'
        assert third['msg_title'] == '3title content'
        assert fourth['msg_title'] == '4title|content'
        assert fifth['msg_title'] == '5title\\ntitle'
Esempio n. 29
0
    def test_dogstatsd_aggregation_perf(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    # metrics
                    ma.submit_packets('counter.%s:%s|c' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h' % (j, i))
                    ma.submit_packets('set.%s:%s|s' % (j, 1.0))

                    # tagged metrics
                    ma.submit_packets('counter.%s:%s|c|#tag1,tag2' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|#tag1,tag2' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|#tag1,tag2' % (j, i))
                    ma.submit_packets('set.%s:%s|s|#tag1,tag2' % (j, i))

                    # sampled metrics
                    ma.submit_packets('counter.%s:%s|c|@0.5' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|@0.5' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|@0.5' % (j, i))
                    ma.submit_packets('set.%s:%s|s|@0.5' % (j, i))

            ma.flush()
Esempio n. 30
0
    def test_event_text(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('_e{2,0}:t1|')
        stats.submit_packets('_e{2,12}:t2|text|content')
        stats.submit_packets(
            '_e{2,23}:t3|First line\\nSecond line')  # \n is a newline
        stats.submit_packets(
            '_e{2,19}:t4|♬ †øU †øU ¥ºu T0µ ♪')  # utf-8 compliant

        events = self.sort_events(stats.flush_events())

        assert len(events) == 4
        first, second, third, fourth = events

        assert first['msg_text'] == ''
        assert second['msg_text'] == 'text|content'
        assert third['msg_text'] == 'First line\nSecond line'
        assert fourth['msg_text'] == '♬ †øU †øU ¥ºu T0µ ♪'
Esempio n. 31
0
    def test_recent_point_threshold(self):
        ag_interval = 1
        threshold = 100
        # The min is not enabled by default
        stats = MetricsBucketAggregator(
            'myhost',
            recent_point_threshold=threshold,
            interval=ag_interval,
            histogram_aggregates=DEFAULT_HISTOGRAM_AGGREGATES + ['min'])
        timestamp_beyond_threshold = time.time() - threshold * 2

        # Ensure that old gauges get dropped due to old timestamps
        stats.submit_metric('my.first.gauge', 5, 'g')
        stats.submit_metric('my.first.gauge',
                            1,
                            'g',
                            timestamp=timestamp_beyond_threshold)
        stats.submit_metric('my.second.gauge',
                            20,
                            'g',
                            timestamp=timestamp_beyond_threshold)

        self.sleep_for_interval_length(ag_interval)
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 1

        first = metrics[0]
        assert first['metric'] == 'my.first.gauge'
        assert first['points'][0][1] == 5
        assert first['host'] == 'myhost'

        timestamp_within_threshold = time.time() - threshold / 2
        bucket_for_timestamp_within_threshold = timestamp_within_threshold - (
            timestamp_within_threshold % ag_interval)
        stats.submit_metric('my.1.gauge', 5, 'g')
        stats.submit_metric('my.1.gauge',
                            1,
                            'g',
                            timestamp=timestamp_within_threshold)
        stats.submit_metric('my.2.counter',
                            20,
                            'c',
                            timestamp=timestamp_within_threshold)
        stats.submit_metric('my.3.set',
                            20,
                            's',
                            timestamp=timestamp_within_threshold)
        stats.submit_metric('my.4.histogram',
                            20,
                            'h',
                            timestamp=timestamp_within_threshold)

        self.sleep_for_interval_length(ag_interval)
        flush_timestamp = time.time()
        # The bucket timestamp is the beginning of the bucket that ended before we flushed
        bucket_timestamp = flush_timestamp - (flush_timestamp %
                                              ag_interval) - ag_interval
        metrics = self.sort_metrics(stats.flush())
        assert len(metrics) == 11

        first, first_b, second, second_b, third, h1, h2, h3, h4, h5, h6 = metrics
        assert first['metric'] == 'my.1.gauge'
        assert first['points'][0][1] == 1
        assert first['host'] == 'myhost'
        self.assert_almost_equal(first['points'][0][0],
                                 bucket_for_timestamp_within_threshold, 0.1)
        assert first_b['metric'] == 'my.1.gauge'
        assert first_b['points'][0][1] == 5
        self.assert_almost_equal(first_b['points'][0][0], bucket_timestamp,
                                 0.1)

        assert second['metric'] == 'my.2.counter'
        assert second['points'][0][1] == 20
        self.assert_almost_equal(second['points'][0][0],
                                 bucket_for_timestamp_within_threshold, 0.1)
        assert second_b['metric'] == 'my.2.counter'
        assert second_b['points'][0][1] == 0
        self.assert_almost_equal(second_b['points'][0][0], bucket_timestamp,
                                 0.1)

        assert third['metric'] == 'my.3.set'
        assert third['points'][0][1] == 1
        self.assert_almost_equal(third['points'][0][0],
                                 bucket_for_timestamp_within_threshold, 0.1)

        assert h1['metric'] == 'my.4.histogram.95percentile'
        assert h1['points'][0][1] == 20
        self.assert_almost_equal(h1['points'][0][0],
                                 bucket_for_timestamp_within_threshold, 0.1)
        assert h1['points'][0][0] == h2['points'][0][0]
        assert h1['points'][0][0] == h3['points'][0][0]
        assert h1['points'][0][0] == h4['points'][0][0]
        assert h1['points'][0][0] == h5['points'][0][0]
    def test_sdstatsd_aggregation_perf(self):
        ma = MetricsBucketAggregator('my.host')

        for _ in xrange(self.FLUSH_COUNT):
            for i in xrange(self.LOOPS_PER_FLUSH):
                for j in xrange(self.METRIC_COUNT):

                    # metrics
                    ma.submit_packets('counter.%s:%s|c' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h' % (j, i))
                    ma.submit_packets('set.%s:%s|s' % (j, 1.0))

                    # tagged metrics
                    ma.submit_packets('counter.%s:%s|c|#tag1,tag2' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|#tag1,tag2' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|#tag1,tag2' % (j, i))
                    ma.submit_packets('set.%s:%s|s|#tag1,tag2' % (j, i))

                    # sampled metrics
                    ma.submit_packets('counter.%s:%s|c|@0.5' % (j, i))
                    ma.submit_packets('gauge.%s:%s|g|@0.5' % (j, i))
                    ma.submit_packets('histogram.%s:%s|h|@0.5' % (j, i))
                    ma.submit_packets('set.%s:%s|s|@0.5' % (j, i))

            ma.flush()
Esempio n. 33
0
    def test_event_tags(self):
        stats = MetricsBucketAggregator('myhost', interval=self.interval)
        stats.submit_packets('_e{6,4}:title1|text')
        stats.submit_packets('_e{6,4}:title2|text|#t1')
        stats.submit_packets('_e{6,4}:title3|text|#t1,t2:v2,t3,t4')
        stats.submit_packets('_e{6,4}:title4|text|k:key|p:normal|#t1,t2')

        events = self.sort_events(stats.flush_events())

        assert len(events) == 4
        first, second, third, fourth = events

        try:
            first['tags']
        except Exception:
            assert True
        else:
            assert False, "event['tags'] shouldn't be defined when no tags aren't explicited in the packet"
        assert first['msg_title'] == 'title1'
        assert first['msg_text'] == 'text'

        assert second['msg_title'] == 'title2'
        assert second['msg_text'] == 'text'
        assert second['tags'] == sorted(['t1'])

        assert third['msg_title'] == 'title3'
        assert third['msg_text'] == 'text'
        assert third['tags'] == sorted(['t1', 't2:v2', 't3', 't4'])

        assert fourth['msg_title'] == 'title4'
        assert fourth['msg_text'] == 'text'
        assert fourth['aggregation_key'] == 'key'
        assert fourth['priority'] == 'normal'
        assert fourth['tags'] == sorted(['t1', 't2'])