def test_udp_and_tcp_collectors_combine_and_flush_to_stdout(self): udp_port = randint(8125, 8999) tcp_port = randint(8125, 9999) flush_interval = 2 self.app_process = self.create_server_process(udp_port=udp_port, tcp_port=tcp_port, flush_interval=flush_interval) client = Client("localhost", udp_port) tcp_client = TCPClient("localhost", tcp_port) for _ in range(0, 2): client.increment("event") tcp_client.increment("event") client.timing("process", 8.5) client.timing("process", 9.8) tcp_client.timing("process", 8.7) tcp_client.timing("query", 2) # wait for at least 1 flush sleep(flush_interval) self.app_process.terminate() wait_until_server_shuts_down(self.app_process) flushed_metrics = self.app_process.communicate()[0].splitlines() self.assertGreater(len(flushed_metrics), 9, 'flushed 1 counter and at least 2 x 4 timers') self.maxDiff = None self.assertDictEqual( { "event": 4, "process.count": 3, "process.max": 9.8, "process.min": 8.5, "process.mean": 9.0, "process.median": 8.7, "query.count": 1, "query.max": 2.0, "query.min": 2.0, "query.mean": 2.0, "query.median": 2.0 }, metrics_to_dict(flushed_metrics) )
def test_increment(self): client = TCPClient("localhost") client._socket = self.mock_socket client.increment("event") self.mock_sendall.assert_called_with("event:1|c\n".encode()) client.increment("region.event name", 2, 0.5) self.mock_sendall.assert_called_with("region.event_name:2|c|@0.5\n".encode())
def test_sending_metrics(self): start = datetime.now() start_timestamp = time() client = TCPClient("localhost", self.__class__.port) client.increment("1.test", 5) client.increment("2.login") client.timing("3.query", 3600) client.gauge("4.memory", 102400) client.gauge_delta("5.memory", 256) client.gauge_delta("6.memory", -128) client.set("7.ip", "127.0.0.1") expected = [ "1.test:5|c", "2.login:1|c", "3.query:3600|ms", "4.memory:102400|g", "5.memory:+256|g", "6.memory:-128|g", "7.ip:127.0.0.1|s", ] self.assert_server_received_expected_requests(expected) client.timing_since("1.query", start_timestamp) client.timing_since("2.other_query", start) chronometer = client.chronometer() chronometer.time_callable("3.sleepy", sleep, 1, (0.02,)) @chronometer.wrap("4.wait_a_sec") def wait_a_sec(): sleep(0.01) wait_a_sec() with client.stopwatch("5.my_with_block"): sleep(0.02) expected_patterns = [ "1.query:[1-9]\d{0,4}\|ms", "2.other_query:[1-9]\d{0,4}\|ms", "3.sleepy:[1-9]\d{0,4}\|ms", "4.wait_a_sec:[1-9]\d{0,4}\|ms", "5.my_with_block:[1-9]\d{0,4}\|ms" ] self.assert_server_received_expected_request_regex(expected_patterns)
def test_sending_metrics(self): start = datetime.now() start_timestamp = time() client = TCPClient("localhost", self.__class__.port) client.increment("1.test", 5) client.increment("2.login") client.timing("3.query", 3600) client.gauge("4.memory", 102400) client.gauge_delta("5.memory", 256) client.gauge_delta("6.memory", -128) client.set("7.ip", "127.0.0.1") expected = [ "1.test:5|c", "2.login:1|c", "3.query:3600|ms", "4.memory:102400|g", "5.memory:+256|g", "6.memory:-128|g", "7.ip:127.0.0.1|s", ] self.assert_server_received_expected_requests(expected) client.timing_since("1.query", start_timestamp) client.timing_since("2.other_query", start) chronometer = client.chronometer() chronometer.time_callable("3.sleepy", sleep, 1, (0.02, )) @chronometer.wrap("4.wait_a_sec") def wait_a_sec(): sleep(0.01) wait_a_sec() with client.stopwatch("5.my_with_block"): sleep(0.02) expected_patterns = [ "1.query:[1-9]\d{0,4}\|ms", "2.other_query:[1-9]\d{0,4}\|ms", "3.sleepy:[1-9]\d{0,4}\|ms", "4.wait_a_sec:[1-9]\d{0,4}\|ms", "5.my_with_block:[1-9]\d{0,4}\|ms" ] self.assert_server_received_expected_request_regex(expected_patterns)
def test_udp_and_tcp_collectors_combine_and_flush_to_stdout(self): udp_port = randint(8125, 8999) tcp_port = randint(8125, 9999) flush_interval = 2 self.app_process = self.create_server_process( udp_port=udp_port, tcp_port=tcp_port, flush_interval=flush_interval) client = Client("localhost", udp_port) tcp_client = TCPClient("localhost", tcp_port) for _ in range(0, 2): client.increment("event") tcp_client.increment("event") client.timing("process", 85) client.timing("process", 98) tcp_client.timing("process", 87) tcp_client.timing("query", 2) # wait for at least 1 flush sleep(flush_interval) self.app_process.terminate() wait_until_server_shuts_down(self.app_process) flushed_metrics = self.app_process.communicate()[0].splitlines() self.assertGreater(len(flushed_metrics), 9, 'flushed 1 counter and at least 2 x 4 timers') self.maxDiff = None self.assertDictEqual( { "event": 4, "process.count": 3, "process.max": 98, "process.min": 85, "process.mean": 90, "process.median": 87, "query.count": 1, "query.max": 2.0, "query.min": 2.0, "query.mean": 2.0, "query.median": 2.0 }, metrics_to_dict(flushed_metrics))
def test_reload_server_keeps_the_queue(self): tcp_port = randint(8125, 9999) _, config_filename = mkstemp(text=True) self.remove_files.append(config_filename) write_to_file( config_filename, """ [navdoon] log-level=DEBUG log-stderr=true flush-stdout=true collect-tcp=127.0.0.1:{} flush-interval=60 """.format(tcp_port) ) self.app_process = self.create_server_process(config=config_filename) tcp_client = TCPClient("localhost", tcp_port) for _ in range(0, 2): tcp_client.increment("event") tcp_client.timing("query", 2) del tcp_client wait_until_server_processed_metric(self.app_process, 'event') udp_port = randint(8125, 8999) flush_interval = 5 write_to_file( config_filename, """ [navdoon] log-level=DEBUG log-stderr=true flush-stdout=true collect-udp=127.0.0.1:{} flush-interval={} """.format(udp_port, flush_interval) ) self.app_process.send_signal(signal.SIGHUP) wait_until_server_starts_collecting(self.app_process, 10) client = Client("localhost", udp_port) for _ in range(0, 2): client.increment("event") client.timing("query", 4) client.increment("finish") self.assertRaises(Exception, TCPClient, "localhost", tcp_port) # TCP collector should be down os.remove(config_filename) wait_until_server_processed_metric(self.app_process, 'finish') # wait for at least 1 flush sleep(flush_interval) self.app_process.terminate() wait_until_server_shuts_down(self.app_process) flushed_metrics = self.app_process.communicate()[0].splitlines() self.assertGreaterEqual(len(flushed_metrics), 5) self.maxDiff = None self.assertDictEqual( { "event": 4, "query.count": 2, "query.max": 4.0, "query.min": 2.0, "query.mean": 3.0, "query.median": 3.0, "finish": 1 }, metrics_to_dict(flushed_metrics) )
def test_reload_server_keeps_the_queue(self): tcp_port = randint(8125, 9999) _, config_filename = mkstemp(text=True) self.remove_files.append(config_filename) write_to_file( config_filename, """ [navdoon] log-level=DEBUG log-stderr=true flush-stdout=true collect-tcp=127.0.0.1:{} flush-interval=60 """.format(tcp_port)) self.app_process = self.create_server_process(config=config_filename) tcp_client = TCPClient("localhost", tcp_port) for _ in range(0, 2): tcp_client.increment("event") tcp_client.timing("query", 2) del tcp_client wait_until_server_processed_metric(self.app_process, 'event') udp_port = randint(8125, 8999) flush_interval = 5 write_to_file( config_filename, """ [navdoon] log-level=DEBUG log-stderr=true flush-stdout=true collect-udp=127.0.0.1:{} flush-interval={} """.format(udp_port, flush_interval)) self.app_process.send_signal(signal.SIGHUP) wait_until_server_starts_collecting(self.app_process, 10) client = Client("localhost", udp_port) for _ in range(0, 2): client.increment("event") client.timing("query", 4) client.increment("finish") self.assertRaises(Exception, TCPClient, "localhost", tcp_port) # TCP collector should be down os.remove(config_filename) wait_until_server_processed_metric(self.app_process, 'finish') # wait for at least 1 flush sleep(flush_interval) self.app_process.terminate() wait_until_server_shuts_down(self.app_process) flushed_metrics = self.app_process.communicate()[0].splitlines() self.assertGreaterEqual(len(flushed_metrics), 5) self.maxDiff = None self.assertDictEqual( { "event": 4, "query.count": 2, "query.max": 4.0, "query.min": 2.0, "query.mean": 3.0, "query.median": 3.0, "finish": 1 }, metrics_to_dict(flushed_metrics))