async def test_read_error_fails(): conn = GapicConnection[int, int]() conn.set_response_it(async_iterable([InternalServerError("abc")])) with pytest.raises(InternalServerError): await conn.read() with pytest.raises(InternalServerError): await conn.read() with pytest.raises(InternalServerError): await conn.write(3)
def test_iteration_w_raw_raising_non_retryable_internal_error_after_token(self): from google.api_core.exceptions import InternalServerError FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN)) SECOND = (self._make_item(2), self._make_item(3)) before = _MockIterator( *FIRST, fail_after=True, error=InternalServerError("testing") ) after = _MockIterator(*SECOND) restart = mock.Mock(spec=[], side_effect=[before, after]) resumable = self._call_fut(restart) with self.assertRaises(InternalServerError): list(resumable) self.assertEqual(restart.mock_calls, [mock.call()]) self.assertNoSpans()
async def test_reinitialize_after_retryable( retrying_connection: Connection[int, int], reinitializer, default_connection, asyncio_sleep, ): reinit_queues = wire_queues(reinitializer.reinitialize) default_connection.read.return_value = 1 await reinit_queues.results.put(InternalServerError("abc")) await reinit_queues.results.put(None) async with retrying_connection as _: asyncio_sleep.assert_called_once_with(_MIN_BACKOFF_SECS) assert reinitializer.reinitialize.call_count == 2 assert await retrying_connection.read() == 1 assert ( default_connection.read.call_count == 2 ) # re-call to read once first completes
def test_iteration_w_raw_raising_retryable_internal_error_after_token(self): from google.api_core.exceptions import InternalServerError FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN)) SECOND = (self._make_item(2), self._make_item(3)) before = _MockIterator( *FIRST, fail_after=True, error=InternalServerError( "Received unexpected EOS on DATA frame from server" ) ) after = _MockIterator(*SECOND) restart = mock.Mock(spec=[], side_effect=[before, after]) resumable = self._call_fut(restart) self.assertEqual(list(resumable), list(FIRST + SECOND)) self.assertEqual( restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)] ) self.assertNoSpans()
def test_insert_rows_sets_metric_on_failure(self): MetricsEnvironment.process_wide_container().reset() client = mock.Mock() client.insert_rows_json = mock.Mock( # Fail a few times, then succeed. side_effect=[ DeadlineExceeded("Deadline Exceeded"), InternalServerError("Internal Error"), [], ]) wrapper = beam.io.gcp.bigquery_tools.BigQueryWrapper(client) wrapper.insert_rows("my_project", "my_dataset", "my_table", []) # Expect two failing calls, then a success (i.e. two retries). self.verify_write_call_metric( "my_project", "my_dataset", "my_table", "deadline_exceeded", 1) self.verify_write_call_metric( "my_project", "my_dataset", "my_table", "internal", 1) self.verify_write_call_metric( "my_project", "my_dataset", "my_table", "ok", 1)
def test_w_unstructured_internal_server_error(self): from google.api_core.exceptions import InternalServerError exc = InternalServerError("testing") self.assertTrue(self._call_fut(exc))
async def test_publishes_retried_on_restart( committer: Committer, default_connection, initial_request, asyncio_sleep, sleep_queues, ): sleep_called = sleep_queues[FLUSH_SECONDS].called sleep_results = sleep_queues[FLUSH_SECONDS].results cursor1 = Cursor(offset=321) cursor2 = Cursor(offset=1) write_called_queue = asyncio.Queue() write_result_queue = asyncio.Queue() default_connection.write.side_effect = make_queue_waiter( write_called_queue, write_result_queue) read_called_queue = asyncio.Queue() read_result_queue = asyncio.Queue() default_connection.read.side_effect = make_queue_waiter( read_called_queue, read_result_queue) read_result_queue.put_nowait(StreamingCommitCursorResponse(initial={})) write_result_queue.put_nowait(None) async with committer: # Set up connection await write_called_queue.get() await read_called_queue.get() default_connection.write.assert_has_calls([call(initial_request)]) # Write message 1 commit_fut1 = asyncio.ensure_future(committer.commit(cursor1)) assert not commit_fut1.done() # Wait for writes to be waiting await sleep_called.get() asyncio_sleep.assert_called_with(FLUSH_SECONDS) # Handle the connection write await sleep_results.put(None) await write_called_queue.get() await write_result_queue.put(None) default_connection.write.assert_has_calls( [call(initial_request), call(as_request(cursor1))]) assert not commit_fut1.done() # Wait for writes to be waiting await sleep_called.get() asyncio_sleep.assert_has_calls( [call(FLUSH_SECONDS), call(FLUSH_SECONDS)]) # Write message 2 commit_fut2 = asyncio.ensure_future(committer.commit(cursor2)) assert not commit_fut2.done() # Handle the connection write await sleep_results.put(None) await write_called_queue.get() await write_result_queue.put(None) default_connection.write.assert_has_calls([ call(initial_request), call(as_request(cursor1)), call(as_request(cursor2)), ]) assert not commit_fut1.done() assert not commit_fut2.done() # Fail the connection with a retryable error await read_called_queue.get() await read_result_queue.put(InternalServerError("retryable")) await sleep_queues[_MIN_BACKOFF_SECS].called.get() await sleep_queues[_MIN_BACKOFF_SECS].results.put(None) # Reinitialization await write_called_queue.get() await write_result_queue.put(None) await read_called_queue.get() await read_result_queue.put(StreamingCommitCursorResponse(initial={})) # Re-sending messages on the new stream await write_called_queue.get() await write_result_queue.put(None) asyncio_sleep.assert_has_calls([ call(FLUSH_SECONDS), call(FLUSH_SECONDS), call(FLUSH_SECONDS), call(_MIN_BACKOFF_SECS), ]) default_connection.write.assert_has_calls([ # Aggregates response calls on second pass call(initial_request), call(as_request(cursor2)), ]) # Sending the response for the one commit finishes both await read_called_queue.get() await read_result_queue.put(as_response(count=1)) await commit_fut1 await commit_fut2
async def test_message_receipt( subscriber: Subscriber, default_connection, base_initial_subscribe, initial_request, asyncio_sleep, sleep_queues, ): write_called_queue = asyncio.Queue() write_result_queue = asyncio.Queue() flow = FlowControlRequest(allowed_messages=100, allowed_bytes=100) message_1 = SequencedMessage(cursor=Cursor(offset=3), size_bytes=5) message_2 = SequencedMessage(cursor=Cursor(offset=5), size_bytes=10) default_connection.write.side_effect = make_queue_waiter( write_called_queue, write_result_queue ) read_called_queue = asyncio.Queue() read_result_queue = asyncio.Queue() default_connection.read.side_effect = make_queue_waiter( read_called_queue, read_result_queue ) read_result_queue.put_nowait(SubscribeResponse(initial={})) write_result_queue.put_nowait(None) async with subscriber: # Set up connection await write_called_queue.get() await read_called_queue.get() default_connection.write.assert_has_calls([call(initial_request)]) # Send tokens. flow_fut = asyncio.ensure_future(subscriber.allow_flow(flow)) assert not flow_fut.done() # Handle the inline write since initial tokens are 100% of outstanding. await write_called_queue.get() await write_result_queue.put(None) await flow_fut default_connection.write.assert_has_calls( [call(initial_request), call(as_request(flow))] ) message1_fut = asyncio.ensure_future(subscriber.read()) # Send messages to the subscriber. await read_result_queue.put(as_response([message_1, message_2])) # Wait for the next read call await read_called_queue.get() assert (await message1_fut) == message_1 assert (await subscriber.read()) == message_2 # Fail the connection with a retryable error await read_called_queue.get() await read_result_queue.put(InternalServerError("retryable")) await sleep_queues[_MIN_BACKOFF_SECS].called.get() await sleep_queues[_MIN_BACKOFF_SECS].results.put(None) # Reinitialization await write_called_queue.get() seek_to_cursor_request = make_initial_subscribe_request( base_initial_subscribe, SeekRequest(cursor=Cursor(offset=message_2.cursor.offset + 1)), ) default_connection.write.assert_has_calls( [ call(initial_request), call(as_request(flow)), call(seek_to_cursor_request), ] ) await write_result_queue.put(None) await read_called_queue.get() await read_result_queue.put(SubscribeResponse(initial={})) # Re-sending flow tokens on the new stream. await write_called_queue.get() await write_result_queue.put(None) default_connection.write.assert_has_calls( [ call(initial_request), call(as_request(flow)), call(seek_to_cursor_request), call( as_request( FlowControlRequest(allowed_messages=98, allowed_bytes=85) ) ), ] )
async def test_flow_resent_on_restart( subscriber: Subscriber, default_connection, initial_request, asyncio_sleep, sleep_queues, ): write_called_queue = asyncio.Queue() write_result_queue = asyncio.Queue() flow_1 = FlowControlRequest(allowed_messages=100, allowed_bytes=100) flow_2 = FlowControlRequest(allowed_messages=5, allowed_bytes=10) flow_3 = FlowControlRequest(allowed_messages=10, allowed_bytes=5) default_connection.write.side_effect = make_queue_waiter( write_called_queue, write_result_queue ) read_called_queue = asyncio.Queue() read_result_queue = asyncio.Queue() default_connection.read.side_effect = make_queue_waiter( read_called_queue, read_result_queue ) read_result_queue.put_nowait(SubscribeResponse(initial={})) write_result_queue.put_nowait(None) async with subscriber: # Set up connection await write_called_queue.get() await read_called_queue.get() default_connection.write.assert_has_calls([call(initial_request)]) # Send tokens. flow_fut1 = asyncio.ensure_future(subscriber.allow_flow(flow_1)) assert not flow_fut1.done() # Handle the inline write since initial tokens are 100% of outstanding. await write_called_queue.get() await write_result_queue.put(None) await flow_fut1 default_connection.write.assert_has_calls( [call(initial_request), call(as_request(flow_1))] ) # Should complete without writing to the connection await subscriber.allow_flow(flow_2) await subscriber.allow_flow(flow_3) # Fail the connection with a retryable error await read_called_queue.get() await read_result_queue.put(InternalServerError("retryable")) await sleep_queues[_MIN_BACKOFF_SECS].called.get() await sleep_queues[_MIN_BACKOFF_SECS].results.put(None) # Reinitialization await write_called_queue.get() await write_result_queue.put(None) await read_called_queue.get() await read_result_queue.put(SubscribeResponse(initial={})) # Re-sending flow tokens on the new stream await write_called_queue.get() await write_result_queue.put(None) default_connection.write.assert_has_calls( [ call(initial_request), call(as_request(flow_1)), call(initial_request), call( as_request( FlowControlRequest(allowed_messages=115, allowed_bytes=115) ) ), ] )
async def test_publishes_retried_on_restart( publisher: Publisher, default_connection, initial_request, asyncio_sleep, sleep_queues, ): sleep_called = sleep_queues[FLUSH_SECONDS].called sleep_results = sleep_queues[FLUSH_SECONDS].results message1 = PubSubMessage(data=b"abc") message2 = PubSubMessage(data=b"def") write_called_queue = asyncio.Queue() write_result_queue = asyncio.Queue() default_connection.write.side_effect = make_queue_waiter( write_called_queue, write_result_queue) read_called_queue = asyncio.Queue() read_result_queue = asyncio.Queue() default_connection.read.side_effect = make_queue_waiter( read_called_queue, read_result_queue) write_result_queue.put_nowait(None) read_result_queue.put_nowait(PublishResponse(initial_response={})) async with publisher: # Set up connection await write_called_queue.get() await read_called_queue.get() default_connection.write.assert_has_calls([call(initial_request)]) # Write message 1 publish_fut1 = asyncio.ensure_future(publisher.publish(message1)) assert not publish_fut1.done() # Wait for writes to be waiting await sleep_called.get() asyncio_sleep.assert_called_with(FLUSH_SECONDS) # Handle the connection write await sleep_results.put(None) await write_called_queue.get() await write_result_queue.put(None) default_connection.write.assert_has_calls( [call(initial_request), call(as_publish_request([message1]))]) assert not publish_fut1.done() # Wait for writes to be waiting await sleep_called.get() asyncio_sleep.assert_has_calls( [call(FLUSH_SECONDS), call(FLUSH_SECONDS)]) # Write message 2 publish_fut2 = asyncio.ensure_future(publisher.publish(message2)) assert not publish_fut2.done() # Handle the connection write await sleep_results.put(None) await write_called_queue.get() await write_result_queue.put(None) default_connection.write.assert_has_calls([ call(initial_request), call(as_publish_request([message1])), call(as_publish_request([message2])), ]) assert not publish_fut1.done() assert not publish_fut2.done() # Fail the connection with a retryable error await read_called_queue.get() await read_result_queue.put(InternalServerError("retryable")) await sleep_queues[_MIN_BACKOFF_SECS].called.get() await sleep_queues[_MIN_BACKOFF_SECS].results.put(None) # Reinitialization await write_called_queue.get() write_result_queue.put_nowait(None) await read_called_queue.get() read_result_queue.put_nowait(PublishResponse(initial_response={})) # Re-sending messages on the new stream await write_called_queue.get() await write_result_queue.put(None) await write_called_queue.get() await write_result_queue.put(None) asyncio_sleep.assert_has_calls([ call(FLUSH_SECONDS), call(FLUSH_SECONDS), call(FLUSH_SECONDS), call(_MIN_BACKOFF_SECS), ]) default_connection.write.assert_has_calls([ call(initial_request), call(as_publish_request([message1])), call(as_publish_request([message2])), call(initial_request), call(as_publish_request([message1])), call(as_publish_request([message2])), ])
async def test_restart( assigner: Assigner, default_connection, connection_factory, initial_request, asyncio_sleep, sleep_queues, ): write_called_queue = asyncio.Queue() write_result_queue = asyncio.Queue() default_connection.write.side_effect = make_queue_waiter( write_called_queue, write_result_queue) read_called_queue = asyncio.Queue() read_result_queue = asyncio.Queue() default_connection.read.side_effect = make_queue_waiter( read_called_queue, read_result_queue) write_result_queue.put_nowait(None) async with assigner: # Set up connection await write_called_queue.get() await read_called_queue.get() default_connection.write.assert_has_calls([call(initial_request)]) # Wait for the first assignment assign_fut1 = asyncio.ensure_future(assigner.get_assignment()) assert not assign_fut1.done() partitions = {Partition(2), Partition(7)} # Send the first assignment. await read_result_queue.put(as_response(partitions=partitions)) await read_called_queue.get() assert (await assign_fut1) == partitions # Get the next assignment: should attempt to send an ack on the stream assign_fut2 = asyncio.ensure_future(assigner.get_assignment()) await write_called_queue.get() default_connection.write.assert_has_calls( [call(initial_request), call(ack_request())]) # Set up the next connection conn2 = MagicMock(spec=Connection) conn2.__aenter__.return_value = conn2 connection_factory.new.return_value = conn2 write_called_queue_2 = asyncio.Queue() write_result_queue_2 = asyncio.Queue() conn2.write.side_effect = make_queue_waiter(write_called_queue_2, write_result_queue_2) read_called_queue_2 = asyncio.Queue() read_result_queue_2 = asyncio.Queue() conn2.read.side_effect = make_queue_waiter(read_called_queue_2, read_result_queue_2) # Fail the connection by failing the write call. await write_result_queue.put(InternalServerError("failed")) await sleep_queues[_MIN_BACKOFF_SECS].called.get() await sleep_queues[_MIN_BACKOFF_SECS].results.put(None) # Reinitialize await write_called_queue_2.get() write_result_queue_2.put_nowait(None) conn2.write.assert_has_calls([call(initial_request)]) partitions = {Partition(5)} # Send the second assignment on the new connection. await read_called_queue_2.get() await read_result_queue_2.put(as_response(partitions=partitions)) assert (await assign_fut2) == partitions # No ack call ever made. conn2.write.assert_has_calls([call(initial_request)])