def test_formatter(self): stats = MetricsAggregator('myhost', interval=10, formatter=get_formatter( {"statsd_metric_namespace": "datadog"})) stats.submit_packets('gauge:16|c|#tag3,tag4') metrics = self.sort_metrics(stats.flush()) self.assertTrue(len(metrics) == 1) self.assertTrue(metrics[0]['metric'] == "datadog.gauge") stats = MetricsAggregator( 'myhost', interval=10, formatter=get_formatter({"statsd_metric_namespace": "datadoge."})) stats.submit_packets('gauge:16|c|#tag3,tag4') metrics = self.sort_metrics(stats.flush()) self.assertTrue(len(metrics) == 1) self.assertTrue(metrics[0]['metric'] == "datadoge.gauge") stats = MetricsAggregator('myhost', interval=10, formatter=get_formatter( {"statsd_metric_namespace": None})) stats.submit_packets('gauge:16|c|#tag3,tag4') metrics = self.sort_metrics(stats.flush()) self.assertTrue(len(metrics) == 1) self.assertTrue(metrics[0]['metric'] == "gauge")
def init(config_path=None, use_watchdog=False, use_forwarder=False, args=None): """Configure the server and the reporting thread. """ c = get_config(parse_args=False, cfg_path=config_path) if (not c['use_dogstatsd'] and (args and args[0] in ['start', 'restart'] or not args)): log.info("Dogstatsd is disabled. Exiting") # We're exiting purposefully, so exit with zero (supervisor's expected # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly # and thus can exit cleanly. sleep(4) sys.exit(0) port = c['dogstatsd_port'] interval = DOGSTATSD_FLUSH_INTERVAL api_key = c['api_key'] aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE non_local_traffic = c['non_local_traffic'] forward_to_host = c.get('statsd_forward_host') forward_to_port = c.get('statsd_forward_port') event_chunk_size = c.get('event_chunk_size') recent_point_threshold = c.get('recent_point_threshold', None) server_host = c['bind_host'] target = c['dd_url'] if use_forwarder: target = c['dogstatsd_target'] hostname = get_hostname(c) # Create the aggregator (which is the point of communication between the # server and reporting threads. assert 0 < interval aggregator = MetricsBucketAggregator( hostname, aggregator_interval, recent_point_threshold=recent_point_threshold, formatter=get_formatter(c), histogram_aggregates=c.get('histogram_aggregates'), histogram_percentiles=c.get('histogram_percentiles'), utf8_decoding=c['utf8_decoding'] ) # Start the reporting thread. reporter = Reporter(interval, aggregator, target, api_key, use_watchdog, event_chunk_size) # NOTICE: when `non_local_traffic` is passed we need to bind to any interface on the box. The forwarder uses # Tornado which takes care of sockets creation (more than one socket can be used at once depending on the # network settings), so it's enough to just pass an empty string '' to the library. # In Dogstatsd we use a single, fullstack socket, so passing '' as the address doesn't work and we default to # '0.0.0.0'. If someone needs to bind Dogstatsd to the IPv6 '::', they need to turn off `non_local_traffic` and # use the '::' meta address as `bind_host`. if non_local_traffic: server_host = '0.0.0.0' server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port) return reporter, server, c
def init5(agent_config=None, use_watchdog=False, use_forwarder=False, args=None): """Configure the server and the reporting thread. """ if (not agent_config['use_dogstatsd'] and (args and args[0] in ['start', 'restart'] or not args)): log.info("StsStatsd is disabled. Exiting") # We're exiting purposefully, so exit with zero (supervisor's expected # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly # and thus can exit cleanly. sleep(4) sys.exit(0) port = agent_config['dogstatsd_port'] interval = DOGSTATSD_FLUSH_INTERVAL api_key = agent_config['api_key'] aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE non_local_traffic = agent_config['non_local_traffic'] forward_to_host = agent_config.get('statsd_forward_host') forward_to_port = agent_config.get('statsd_forward_port') event_chunk_size = agent_config.get('event_chunk_size') recent_point_threshold = agent_config.get('recent_point_threshold', None) so_rcvbuf = agent_config.get('statsd_so_rcvbuf', None) server_host = agent_config['bind_host'] target = agent_config['dd_url'] if use_forwarder: target = agent_config['dogstatsd_target'] hostname = get_hostname(agent_config) log.debug("Using hostname \"%s\"", hostname) # Create the aggregator (which is the point of communication between the # server and reporting threads. assert 0 < interval aggregator = MetricsBucketAggregator( hostname, aggregator_interval, recent_point_threshold=recent_point_threshold, formatter=get_formatter(agent_config), histogram_aggregates=agent_config.get('histogram_aggregates'), histogram_percentiles=agent_config.get('histogram_percentiles'), utf8_decoding=agent_config['utf8_decoding'] ) # Start the reporting thread. reporter = Reporter(interval, aggregator, target, api_key, use_watchdog, event_chunk_size) # NOTICE: when `non_local_traffic` is passed we need to bind to any interface on the box. The forwarder uses # Tornado which takes care of sockets creation (more than one socket can be used at once depending on the # network settings), so it's enough to just pass an empty string '' to the library. # In Dogstatsd we use a single, fullstack socket, so passing '' as the address doesn't work and we default to # '0.0.0.0'. If someone needs to bind Dogstatsd to the IPv6 '::', they need to turn off `non_local_traffic` and # use the '::' meta address as `bind_host`. if non_local_traffic: server_host = '0.0.0.0' server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port, so_rcvbuf=so_rcvbuf) return reporter, server
def init(config_path=None, use_watchdog=False, use_forwarder=False, args=None): """Configure the server and the reporting thread. """ c = get_config(parse_args=False, cfg_path=config_path) if not c['use_dogstatsd'] and \ (args and args[0] in ['start', 'restart'] or not args): log.info("Dogstatsd is disabled. Exiting") # We're exiting purposefully, so exit with zero (supervisor's expected # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly # and thus can exit cleanly. sleep(4) sys.exit(0) log.debug("Configurating dogstatsd") port = c['dogstatsd_port'] interval = DOGSTATSD_FLUSH_INTERVAL api_key = c['api_key'] aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE non_local_traffic = c['non_local_traffic'] forward_to_host = c.get('statsd_forward_host') forward_to_port = c.get('statsd_forward_port') event_chunk_size = c.get('event_chunk_size') recent_point_threshold = c.get('recent_point_threshold', None) target = c['dd_url'] if use_forwarder: target = c['dogstatsd_target'] hostname = get_hostname(c) # Create the aggregator (which is the point of communication between the # server and reporting threads. assert 0 < interval aggregator = MetricsBucketAggregator( hostname, aggregator_interval, recent_point_threshold=recent_point_threshold, formatter=get_formatter(c)) # Start the reporting thread. reporter = Reporter(interval, aggregator, target, api_key, use_watchdog, event_chunk_size) # Start the server on an IPv4 stack # Default to loopback server_host = c['bind_host'] # If specified, bind to all addressses if non_local_traffic: server_host = '' server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port) return reporter, server, c
def init(config_path=None, use_watchdog=False, use_forwarder=False, args=None): """Configure the server and the reporting thread. """ c = get_config(parse_args=False, cfg_path=config_path) if (not c['use_dogstatsd'] and (args and args[0] in ['start', 'restart'] or not args)): log.info("Dogstatsd is disabled. Exiting") # We're exiting purposefully, so exit with zero (supervisor's expected # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly # and thus can exit cleanly. sleep(4) sys.exit(0) log.debug("Configuring dogstatsd") port = c['dogstatsd_port'] interval = DOGSTATSD_FLUSH_INTERVAL api_key = c['api_key'] aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE non_local_traffic = c['non_local_traffic'] forward_to_host = c.get('statsd_forward_host') forward_to_port = c.get('statsd_forward_port') event_chunk_size = c.get('event_chunk_size') recent_point_threshold = c.get('recent_point_threshold', None) target = c['dd_url'] if use_forwarder: target = c['dogstatsd_target'] hostname = get_hostname(c) # Create the aggregator (which is the point of communication between the # server and reporting threads. assert 0 < interval aggregator = MetricsBucketAggregator( hostname, aggregator_interval, recent_point_threshold=recent_point_threshold, formatter=get_formatter(c), histogram_aggregates=c.get('histogram_aggregates'), histogram_percentiles=c.get('histogram_percentiles'), utf8_decoding=c['utf8_decoding'] ) # Start the reporting thread. reporter = Reporter(interval, aggregator, target, api_key, use_watchdog, event_chunk_size) # Start the server on an IPv4 stack # Default to loopback server_host = c['bind_host'] # If specified, bind to all addressses if non_local_traffic: server_host = '' server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port) return reporter, server, c
def init(config_path=None, use_watchmonitor=False, use_forwarder=False, args=None): c = get_config(parse_args=False, cfg_path=config_path) if (not c['use_monitorstatsd'] and (args and args[0] in ['start', 'restart'] or not args)): log.info("Monitorstatsd is disabled. Exiting") sleep(4) sys.exit(0) log.debug("Configuring monitorstatsd") port = c['monitorstatsd_port'] interval = monitorSTATSD_FLUSH_INTERVAL api_key = c['api_key'] aggregator_interval = monitorSTATSD_AGGREGATOR_BUCKET_SIZE non_local_traffic = c['non_local_traffic'] forward_to_host = c.get('statsd_forward_host') forward_to_port = c.get('statsd_forward_port') event_chunk_size = c.get('event_chunk_size') recent_point_threshold = c.get('recent_point_threshold', None) ip = c.get('ip', "unknown") target = c['m_url'] if use_forwarder: target = c['monitorstatsd_target'] hostname = get_hostname(c) assert 0 < interval aggregator = MetricsBucketAggregator( hostname, aggregator_interval, recent_point_threshold=recent_point_threshold, formatter=get_formatter(c), histogram_aggregates=c.get('histogram_aggregates'), histogram_percentiles=c.get('histogram_percentiles'), utf8_decoding=c['utf8_decoding']) reporter = Reporter(c, interval, aggregator, target, api_key, use_watchmonitor, event_chunk_size) server_host = c['bind_host'] if non_local_traffic: server_host = '' server = Server(aggregator, server_host, port, forward_to_host=forward_to_host, forward_to_port=forward_to_port) return reporter, server, c
def test_formatter(self): stats = MetricsAggregator('myhost', interval=10, formatter = get_formatter({"statsd_metric_namespace": "datadog"})) stats.submit_packets('gauge:16|c|#tag3,tag4') metrics = self.sort_metrics(stats.flush()) self.assertTrue(len(metrics) == 1) self.assertTrue(metrics[0]['metric'] == "datadog.gauge") stats = MetricsAggregator('myhost', interval=10, formatter = get_formatter({"statsd_metric_namespace": "datadoge."})) stats.submit_packets('gauge:16|c|#tag3,tag4') metrics = self.sort_metrics(stats.flush()) self.assertTrue(len(metrics) == 1) self.assertTrue(metrics[0]['metric'] == "datadoge.gauge") stats = MetricsAggregator('myhost', interval=10, formatter = get_formatter({"statsd_metric_namespace": None})) stats.submit_packets('gauge:16|c|#tag3,tag4') metrics = self.sort_metrics(stats.flush()) self.assertTrue(len(metrics) == 1) self.assertTrue(metrics[0]['metric'] == "gauge")