def __init__( self, initial: InitialPublishRequest, batching_settings: BatchSettings, factory: ConnectionFactory[PublishRequest, PublishResponse], ): self._initial = initial self._batching_settings = batching_settings self._connection = RetryingConnection(factory, self) self._batcher = SerialBatcher(self) self._outstanding_writes = [] self._receiver = None self._flusher = None
def __init__( self, initial: InitialCommitCursorRequest, flush_seconds: float, factory: ConnectionFactory[StreamingCommitCursorRequest, StreamingCommitCursorResponse], ): self._initial = initial self._flush_seconds = flush_seconds self._connection = RetryingConnection(factory, self) self._batcher = SerialBatcher(self) self._outstanding_commits = [] self._receiver = None self._flusher = None
class SinglePartitionPublisher( Publisher, ConnectionReinitializer[PublishRequest, PublishResponse], BatchTester[PubSubMessage], ): _initial: InitialPublishRequest _batching_settings: BatchSettings _connection: RetryingConnection[PublishRequest, PublishResponse] _batcher: SerialBatcher[PubSubMessage, Cursor] _outstanding_writes: List[List[WorkItem[PubSubMessage, Cursor]]] _receiver: Optional[asyncio.Future] _flusher: Optional[asyncio.Future] def __init__( self, initial: InitialPublishRequest, batching_settings: BatchSettings, factory: ConnectionFactory[PublishRequest, PublishResponse], ): self._initial = initial self._batching_settings = batching_settings self._connection = RetryingConnection(factory, self) self._batcher = SerialBatcher(self) self._outstanding_writes = [] self._receiver = None self._flusher = None @property def _partition(self) -> Partition: return Partition(self._initial.partition) async def __aenter__(self): await self._connection.__aenter__() return self def _start_loopers(self): assert self._receiver is None assert self._flusher is None self._receiver = asyncio.ensure_future(self._receive_loop()) self._flusher = asyncio.ensure_future(self._flush_loop()) async def _stop_loopers(self): if self._receiver: self._receiver.cancel() await wait_ignore_errors(self._receiver) self._receiver = None if self._flusher: self._flusher.cancel() await wait_ignore_errors(self._flusher) self._flusher = None def _handle_response(self, response: PublishResponse): if "message_response" not in response: self._connection.fail( FailedPrecondition( "Received an invalid subsequent response on the publish stream." ) ) if not self._outstanding_writes: self._connection.fail( FailedPrecondition( "Received an publish response on the stream with no outstanding publishes." ) ) next_offset: Cursor = response.message_response.start_cursor.offset batch: List[WorkItem[PubSubMessage]] = self._outstanding_writes.pop(0) for item in batch: item.response_future.set_result(Cursor(offset=next_offset)) next_offset += 1 async def _receive_loop(self): while True: response = await self._connection.read() self._handle_response(response) async def _flush_loop(self): while True: await asyncio.sleep(self._batching_settings.max_latency) await self._flush() async def __aexit__(self, exc_type, exc_val, exc_tb): if self._connection.error(): self._fail_if_retrying_failed() else: await self._flush() await self._stop_loopers() await self._connection.__aexit__(exc_type, exc_val, exc_tb) def _fail_if_retrying_failed(self): if self._connection.error(): for batch in self._outstanding_writes: for item in batch: item.response_future.set_exception(self._connection.error()) async def _flush(self): batch = self._batcher.flush() if not batch: return self._outstanding_writes.append(batch) aggregate = PublishRequest() aggregate.message_publish_request.messages = [item.request for item in batch] try: await self._connection.write(aggregate) except GoogleAPICallError as e: _LOGGER.debug(f"Failed publish on stream: {e}") self._fail_if_retrying_failed() async def publish(self, message: PubSubMessage) -> MessageMetadata: cursor_future = self._batcher.add(message) if self._batcher.should_flush(): await self._flush() return MessageMetadata(self._partition, await cursor_future) async def reinitialize( self, connection: Connection[PublishRequest, PublishResponse] ): await self._stop_loopers() await connection.write(PublishRequest(initial_request=self._initial)) response = await connection.read() if "initial_response" not in response: self._connection.fail( FailedPrecondition( "Received an invalid initial response on the publish stream." ) ) for batch in self._outstanding_writes: aggregate = PublishRequest() aggregate.message_publish_request.messages = [ item.request for item in batch ] await connection.write(aggregate) self._start_loopers() def test(self, requests: Iterable[PubSubMessage]) -> bool: request_count = 0 byte_count = 0 for req in requests: request_count += 1 byte_count += PubSubMessage.pb(req).ByteSize() return (request_count >= _MAX_MESSAGES) or (byte_count >= _MAX_BYTES)
class CommitterImpl( Committer, ConnectionReinitializer[StreamingCommitCursorRequest, StreamingCommitCursorResponse], ): _initial: InitialCommitCursorRequest _flush_seconds: float _connection: RetryingConnection[StreamingCommitCursorRequest, StreamingCommitCursorResponse] _batcher: SerialBatcher[Cursor, None] _outstanding_commits: List[List[WorkItem[Cursor, None]]] _receiver: Optional[asyncio.Future] _flusher: Optional[asyncio.Future] _empty: asyncio.Event def __init__( self, initial: InitialCommitCursorRequest, flush_seconds: float, factory: ConnectionFactory[StreamingCommitCursorRequest, StreamingCommitCursorResponse], ): self._initial = initial self._flush_seconds = flush_seconds self._connection = RetryingConnection(factory, self) self._batcher = SerialBatcher() self._outstanding_commits = [] self._receiver = None self._flusher = None self._empty = asyncio.Event() self._empty.set() async def __aenter__(self): await self._connection.__aenter__() return self def _start_loopers(self): assert self._receiver is None assert self._flusher is None self._receiver = asyncio.ensure_future(self._receive_loop()) self._flusher = asyncio.ensure_future(self._flush_loop()) async def _stop_loopers(self): if self._receiver: self._receiver.cancel() await wait_ignore_errors(self._receiver) self._receiver = None if self._flusher: self._flusher.cancel() await wait_ignore_errors(self._flusher) self._flusher = None def _handle_response(self, response: StreamingCommitCursorResponse): if "commit" not in response: self._connection.fail( FailedPrecondition( "Received an invalid subsequent response on the commit stream." )) if response.commit.acknowledged_commits > len( self._outstanding_commits): self._connection.fail( FailedPrecondition( "Received a commit response on the stream with no outstanding commits." )) for _ in range(response.commit.acknowledged_commits): batch = self._outstanding_commits.pop(0) for item in batch: item.response_future.set_result(None) if len(self._outstanding_commits) == 0: self._empty.set() async def _receive_loop(self): while True: response = await self._connection.read() self._handle_response(response) async def _flush_loop(self): while True: await asyncio.sleep(self._flush_seconds) await self._flush() async def __aexit__(self, exc_type, exc_val, exc_tb): await self._stop_loopers() if self._connection.error(): self._fail_if_retrying_failed() else: await self._flush() await self._connection.__aexit__(exc_type, exc_val, exc_tb) def _fail_if_retrying_failed(self): if self._connection.error(): for batch in self._outstanding_commits: for item in batch: item.response_future.set_exception( self._connection.error()) async def _flush(self): batch = self._batcher.flush() if not batch: return self._outstanding_commits.append(batch) self._empty.clear() req = StreamingCommitCursorRequest() req.commit.cursor = batch[-1].request try: await self._connection.write(req) except GoogleAPICallError as e: _LOGGER.debug(f"Failed commit on stream: {e}") self._fail_if_retrying_failed() async def wait_until_empty(self): await self._flush() await self._connection.await_unless_failed(self._empty.wait()) async def commit(self, cursor: Cursor) -> None: future = self._batcher.add(cursor) await future async def reinitialize( self, connection: Connection[StreamingCommitCursorRequest, StreamingCommitCursorResponse], last_error: Optional[GoogleAPICallError], ): await self._stop_loopers() await connection.write( StreamingCommitCursorRequest(initial=self._initial)) response = await connection.read() if "initial" not in response: self._connection.fail( FailedPrecondition( "Received an invalid initial response on the publish stream." )) if self._outstanding_commits: # Roll up outstanding commits rollup: List[WorkItem[Cursor, None]] = [] for batch in self._outstanding_commits: for item in batch: rollup.append(item) self._outstanding_commits = [rollup] req = StreamingCommitCursorRequest() req.commit.cursor = rollup[-1].request await connection.write(req) self._start_loopers()