def stream_update(self, stream_name: str, token: str, data: Any): """Called when a new update is available to stream to clients. We need to check if the client is interested in the stream or not """ self.send_command( RdataCommand(stream_name, self._instance_name, token, data))
def test_backwards_stream_id(self): """ Test that RDATA that comes after the current position should be discarded. """ # disconnect, so that we can stack up some changes self.disconnect() # Generate an events. We inject them using inject_event so that they are # not send out over replication until we call self.replicate(). event = self._inject_test_event() # check we're testing what we think we are: no rows should yet have been # received self.assertEqual([], self.test_handler.received_rdata_rows) # now reconnect to pull the updates self.reconnect() self.replicate() # We should have received the expected single row (as well as various # cache invalidation updates which we ignore). received_rows = [ row for row in self.test_handler.received_rdata_rows if row[0] == "events" ] # There should be a single received row. self.assertEqual(len(received_rows), 1) stream_name, token, row = received_rows[0] self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, event.event_id) # Reset the data. self.test_handler.received_rdata_rows = [] # Save the current token for later. worker_events_stream = self.worker_hs.get_replication_streams( )["events"] prev_token = worker_events_stream.current_token("master") # Manually send an old RDATA command, which should get dropped. This # re-uses the row from above, but with an earlier stream token. self.hs.get_replication_command_handler().send_command( RdataCommand("events", "master", 1, row)) # No updates have been received (because it was discard as old). received_rows = [ row for row in self.test_handler.received_rdata_rows if row[0] == "events" ] self.assertEqual(len(received_rows), 0) # Ensure the stream has not gone backwards. current_token = worker_events_stream.current_token("master") self.assertGreaterEqual(current_token, prev_token)
def stream_update(self, stream_name, token, data): """Called when a new update is available to stream to clients. We need to check if the client is interested in the stream or not """ if stream_name in self.replication_streams: # The client is subscribed to the stream self.send_command(RdataCommand(stream_name, token, data)) elif stream_name in self.connecting_streams: # The client is being subscribed to the stream logger.debug("[%s] Queuing RDATA %r %r", self.id(), stream_name, token) self.pending_rdata.setdefault(stream_name, []).append((token, data)) else: # The client isn't subscribed logger.debug("[%s] Dropping RDATA %r %r", self.id(), stream_name, token)
def stream_update(self, stream_name: str, token: Optional[int], data: Any) -> None: """Called when a new update is available to stream to Redis subscribers. We need to check if the client is interested in the stream or not """ self.send_command(RdataCommand(stream_name, self._instance_name, token, data))
async def subscribe_to_stream(self, stream_name, token): """Subscribe the remote to a stream. This invloves checking if they've missed anything and sending those updates down if they have. During that time new updates for the stream are queued and sent once we've sent down any missed updates. """ self.replication_streams.discard(stream_name) self.connecting_streams.add(stream_name) try: # Get missing updates updates, current_token = await self.streamer.get_stream_updates( stream_name, token ) # Send all the missing updates for update in updates: token, row = update[0], update[1] self.send_command(RdataCommand(stream_name, token, row)) # We send a POSITION command to ensure that they have an up to # date token (especially useful if we didn't send any updates # above) self.send_command(PositionCommand(stream_name, current_token)) # Now we can send any updates that came in while we were subscribing pending_rdata = self.pending_rdata.pop(stream_name, []) updates = [] for token, update in pending_rdata: # If the token is null, it is part of a batch update. Batches # are multiple updates that share a single token. To denote # this, the token is set to None for all tokens in the batch # except for the last. If we find a None token, we keep looking # through tokens until we find one that is not None and then # process all previous updates in the batch as if they had the # final token. if token is None: # Store this update as part of a batch updates.append(update) continue if token <= current_token: # This update or batch of updates is older than # current_token, dismiss it updates = [] continue updates.append(update) # Send all updates that are part of this batch with the # found token for update in updates: self.send_command(RdataCommand(stream_name, token, update)) # Clear stored updates updates = [] # They're now fully subscribed self.replication_streams.add(stream_name) except Exception as e: logger.exception("[%s] Failed to handle REPLICATE command", self.id()) self.send_error("failed to handle replicate: %r", e) finally: self.connecting_streams.discard(stream_name)