def test_forced_flush(mock_send, caplog): transport = Transport(metadata={}, max_buffer_size=1000, compress_level=0) with caplog.at_level("DEBUG", "elasticapm.transport"): transport.queue("error", "x", flush=True) transport.close() assert mock_send.call_count == 1 assert transport._queued_data is None
def test_api_request_size_dynamic(mock_flush, caplog, elasticapm_client): elasticapm_client.config.update(version="1", api_request_size="100b") transport = Transport(client=elasticapm_client, queue_chill_count=1) transport.start_thread() try: with caplog.at_level("DEBUG", "elasticapm.transport"): # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(12): transport.queue( "error", "".join( random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) assert mock_flush.call_count == 1 elasticapm_client.config.update(version="1", api_request_size="1mb") with caplog.at_level("DEBUG", "elasticapm.transport"): # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(12): transport.queue( "error", "".join( random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) # Should be unchanged because our buffer limit is much higher. assert mock_flush.call_count == 1 finally: transport.close()
def test_flush_time(mock_send, caplog): transport = Transport(metadata={}, max_flush_time=5) transport._last_flush = timeit.default_timer() - 5.1 with caplog.at_level("DEBUG", "elasticapm.transport"): transport.queue("error", {}) record = caplog.records[0] assert "5.1" in record.message assert mock_send.call_count == 1 assert transport._queued_data is None
def test_forced_flush(mock_send, caplog, elasticapm_client): transport = Transport(client=elasticapm_client, compress_level=0) transport.start_thread() try: with caplog.at_level("DEBUG", "elasticapm.transport"): transport.queue("error", "x", flush=True) finally: transport.close() assert mock_send.call_count == 1 assert transport._queued_data is None
def test_flush_time_size(mock_send, caplog): with caplog.at_level("DEBUG", "elasticapm.transport"): transport = Transport(metadata={}, max_buffer_size=100) # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(9): transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) record = caplog.records[1] assert "queue size" in record.message assert mock_send.call_count == 1 assert transport._queued_data is None
def test_metadata_prepended(mock_send): transport = Transport(metadata={"x": "y"}, max_flush_time=5, compress_level=0) transport.queue("error", {}, flush=True) assert mock_send.call_count == 1 args, kwargs = mock_send.call_args if compat.PY2: data = gzip.GzipFile(fileobj=compat.StringIO(args[0])).read() else: data = gzip.decompress(args[0]) data = data.decode("utf-8").split("\n") assert "metadata" in data[0]
def test_flush_time_size(mock_flush, caplog, elasticapm_client): transport = Transport(client=elasticapm_client, queue_chill_count=1) transport.start_thread() try: with caplog.at_level("DEBUG", "elasticapm.transport"): # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(12): transport.queue("error", "".join(random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) assert mock_flush.call_count == 1 finally: transport.close()
def test_sync_transport_fail_and_recover(mock_send, caplog): mock_send.side_effect = TransportException("meh") transport = Transport() transport.queue("x", {}, flush=True) assert transport.state.did_fail() # first retry should be allowed immediately assert transport.state.should_try() # recover mock_send.side_effect = None transport.queue("x", {}, flush=True) assert not transport.state.did_fail()
def test_metadata_prepended(mock_send, elasticapm_client): transport = Transport(client=elasticapm_client, compress_level=0) transport.start_thread() transport.queue("error", {}, flush=True) transport.close() assert mock_send.call_count == 1 args, kwargs = mock_send.call_args if compat.PY2: data = gzip.GzipFile(fileobj=compat.StringIO(args[0])).read() else: data = gzip.decompress(args[0]) data = data.decode("utf-8").split("\n") assert "metadata" in data[0]
def test_flush_time_size(mock_flush, caplog): transport = Transport(metadata={}, max_buffer_size=100, queue_chill_count=1) with caplog.at_level("DEBUG", "elasticapm.transport"): # we need to add lots of uncompressible data to fill up the gzip-internal buffer for i in range(12): transport.queue( "error", "".join( random.choice(string.ascii_letters) for i in range(2000))) transport._flushed.wait(timeout=0.1) assert mock_flush.call_count == 1 transport.close()
def test_sync_transport_fail_and_recover(mock_send, caplog): transport = Transport(client=None) transport.start_thread() try: mock_send.side_effect = TransportException("meh") transport.queue("x", {}) transport.flush() assert transport.state.did_fail() # first retry should be allowed immediately assert transport.state.should_try() # recover mock_send.side_effect = None transport.queue("x", {}) transport.flush() assert not transport.state.did_fail() finally: transport.close()