def test_api_request_size_dynamic(mock_flush, caplog, elasticapm_client): elasticapm_client.config.update(version="1", api_request_size="100b") transport = Transport(client=elasticapm_client, queue_chill_count=1) transport.start_thread() try: with caplog.at_level("DEBUG", "elasticapm.transport"): # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(12): transport.queue( "error", "".join( random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) assert mock_flush.call_count == 1 elasticapm_client.config.update(version="1", api_request_size="1mb") with caplog.at_level("DEBUG", "elasticapm.transport"): # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(12): transport.queue( "error", "".join( random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) # Should be unchanged because our buffer limit is much higher. assert mock_flush.call_count == 1 finally: transport.close()
def test_forced_flush(mock_send, caplog): with caplog.at_level("DEBUG", "elasticapm.transport"): transport = Transport(metadata={}, max_buffer_size=1000, compress_level=0) transport.queue("error", "x", flush=True) record = caplog.records[0] assert "forced" in record.message assert mock_send.call_count == 1 assert transport._queued_data is None
def test_flush_time(mock_send, caplog): with caplog.at_level("DEBUG", "elasticapm.transport"): transport = Transport(metadata={}, max_flush_time=0.1) # let first run finish time.sleep(0.2) transport.close() record = caplog.records[0] assert "0.1" in record.message assert mock_send.call_count == 0
def test_flush_time(mock_send, caplog): transport = Transport(metadata={}, max_flush_time=5) transport._last_flush = timeit.default_timer() - 5.1 with caplog.at_level("DEBUG", "elasticapm.transport"): transport.queue("error", {}) record = caplog.records[0] assert "5.1" in record.message assert mock_send.call_count == 1 assert transport._queued_data is None
def test_flush_time_size(mock_send, caplog): with caplog.at_level("DEBUG", "elasticapm.transport"): transport = Transport(metadata={}, max_buffer_size=100) # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(9): transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) record = caplog.records[1] assert "queue size" in record.message assert mock_send.call_count == 1 assert transport._queued_data is None
def test_metadata_prepended(mock_send): transport = Transport(metadata={"x": "y"}, max_flush_time=5, compress_level=0) transport.queue("error", {}, flush=True) assert mock_send.call_count == 1 args, kwargs = mock_send.call_args if compat.PY2: data = gzip.GzipFile(fileobj=compat.StringIO(args[0])).read() else: data = gzip.decompress(args[0]) data = data.decode("utf-8").split("\n") assert "metadata" in data[0]
def test_empty_queue_flush_is_not_sent(mock_send, elasticapm_client): transport = Transport(client=elasticapm_client) try: transport.start_thread() transport.flush() assert mock_send.call_count == 0 finally: transport.close()
def test_empty_queue_flush_is_not_sent(mock_send): transport = Transport(client=None, metadata={"x": "y"}, max_flush_time=5) try: transport.start_thread() transport.flush() assert mock_send.call_count == 0 finally: transport.close()
def test_forced_flush(mock_send, caplog, elasticapm_client): transport = Transport(client=elasticapm_client, compress_level=0) transport.start_thread() try: with caplog.at_level("DEBUG", "elasticapm.transport"): transport.queue("error", "x", flush=True) finally: transport.close() assert mock_send.call_count == 1 assert transport._queued_data is None
def test_flush_time_size(mock_flush, caplog, elasticapm_client): transport = Transport(client=elasticapm_client, queue_chill_count=1) transport.start_thread() try: with caplog.at_level("DEBUG", "elasticapm.transport"): # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(12): transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) assert mock_flush.call_count == 1 finally: transport.close()
def test_metadata_prepended(mock_send, elasticapm_client): transport = Transport(client=elasticapm_client, compress_level=0) transport.start_thread() transport.queue("error", {}, flush=True) transport.close() assert mock_send.call_count == 1 args, kwargs = mock_send.call_args if compat.PY2: data = gzip.GzipFile(fileobj=compat.StringIO(args[0])).read() else: data = gzip.decompress(args[0]) data = data.decode("utf-8").split("\n") assert "metadata" in data[0]
def test_transport_metadata_pid_change(mock_send, elasticapm_client): transport = Transport(client=elasticapm_client) assert not transport._metadata transport.start_thread() time.sleep(0.2) assert transport._metadata transport.close()
def test_flush_time(mock_send, caplog): with caplog.at_level("DEBUG", "elasticapm.transport"): transport = Transport(client=None, metadata={}, max_flush_time=0.1) transport.start_thread() # let first run finish time.sleep(0.2) transport.close() record = caplog.records[0] assert "due to time since last flush" in record.message assert mock_send.call_count == 0
def test_flush_time(mock_send, caplog, elasticapm_client): with caplog.at_level("DEBUG", "elasticapm.transport"): transport = Transport(client=elasticapm_client) transport.start_thread() # let first run finish time.sleep(0.2) transport.close() assert_any_record_contains(caplog.records, "due to time since last flush", "elasticapm.transport") assert mock_send.call_count == 0
def test_sync_transport_fail_and_recover(mock_send, caplog): mock_send.side_effect = TransportException("meh") transport = Transport() transport.queue("x", {}, flush=True) assert transport.state.did_fail() # first retry should be allowed immediately assert transport.state.should_try() # recover mock_send.side_effect = None transport.queue("x", {}, flush=True) assert not transport.state.did_fail()
def test_flush_time_size(mock_flush, caplog): transport = Transport(metadata={}, max_buffer_size=100, queue_chill_count=1) with caplog.at_level("DEBUG", "elasticapm.transport"): # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(12): transport.queue( "error", "".join( random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) assert mock_flush.call_count == 1 transport.close()
def test_sync_transport_fail_and_recover(mock_send, caplog): transport = Transport(client=None) transport.start_thread() try: mock_send.side_effect = TransportException("meh") transport.queue("x", {}) transport.flush() assert transport.state.did_fail() # first retry should be allowed immediately assert transport.state.should_try() # recover mock_send.side_effect = None transport.queue("x", {}) transport.flush() assert not transport.state.did_fail() finally: transport.close()
def test_empty_queue_flush_is_not_sent(mock_send): transport = Transport(metadata={"x": "y"}, max_flush_time=5) transport.flush() assert mock_send.call_count == 0
def test_compress_level_sanitization(): assert Transport(compress_level=None)._compress_level == 0 assert Transport(compress_level=-1)._compress_level == 0 assert Transport(compress_level=10)._compress_level == 9
def test_api_request_time_dynamic(mock_send, caplog, elasticapm_client): elasticapm_client.config.update(version="1", api_request_time="1s") with caplog.at_level("DEBUG", "elasticapm.transport"): transport = Transport(client=elasticapm_client) transport.start_thread() # let first run finish time.sleep(0.2) transport.close() assert not caplog.records assert mock_send.call_count == 0 elasticapm_client.config.update(version="1", api_request_time="100ms") with caplog.at_level("DEBUG", "elasticapm.transport"): transport = Transport(client=elasticapm_client) transport.start_thread() # let first run finish time.sleep(0.2) transport.close() assert_any_record_contains(caplog.records, "due to time since last flush", "elasticapm.transport") assert mock_send.call_count == 0