def notify_interested_services(self, current_id): """Notifies (pushes) all application services interested in this event. Pushing is done asynchronously, so this method won't block for any prolonged length of time. Args: current_id(int): The current maximum ID. """ services = self.store.get_app_services() if not services or not self.notify_appservices: return self.current_max = max(self.current_max, current_id) if self.is_processing: return with Measure(self.clock, "notify_interested_services"): self.is_processing = True try: limit = 100 while True: upper_bound, events = yield self.store.get_new_events_for_appservice( self.current_max, limit) if not events: break events_by_room = {} for event in events: events_by_room.setdefault(event.room_id, []).append(event) @defer.inlineCallbacks def handle_event(event): # Gather interested services services = yield self._get_services_for_event(event) if len(services) == 0: return # no services need notifying # Do we know this user exists? If not, poke the user # query API for all services which match that user regex. # This needs to block as these user queries need to be # made BEFORE pushing the event. yield self._check_user_exists(event.sender) if event.type == EventTypes.Member: yield self._check_user_exists(event.state_key) if not self.started_scheduler: def start_scheduler(): return self.scheduler.start().addErrback( log_failure, "Application Services Failure") run_as_background_process("as_scheduler", start_scheduler) self.started_scheduler = True # Fork off pushes to these services for service in services: self.scheduler.submit_event_for_as(service, event) @defer.inlineCallbacks def handle_room_events(events): for event in events: yield handle_event(event) yield make_deferred_yieldable( defer.gatherResults( [ run_in_background(handle_room_events, evs) for evs in itervalues(events_by_room) ], consumeErrors=True, )) yield self.store.set_appservice_last_pos(upper_bound) now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_positions.labels( "appservice_sender").set(upper_bound) events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels( "appservice_sender").inc(len(events_by_room)) event_processing_loop_counter.labels( "appservice_sender").inc() synapse.metrics.event_processing_lag.labels( "appservice_sender").set(now - ts) synapse.metrics.event_processing_last_ts.labels( "appservice_sender").set(ts) finally: self.is_processing = False
async def _notify_interested_services(self, max_token: RoomStreamToken): with Measure(self.clock, "notify_interested_services"): self.is_processing = True try: limit = 100 while True: ( upper_bound, events, ) = await self.store.get_new_events_for_appservice( self.current_max, limit) if not events: break events_by_room = {} # type: Dict[str, List[EventBase]] for event in events: events_by_room.setdefault(event.room_id, []).append(event) async def handle_event(event): # Gather interested services services = await self._get_services_for_event(event) if len(services) == 0: return # no services need notifying # Do we know this user exists? If not, poke the user # query API for all services which match that user regex. # This needs to block as these user queries need to be # made BEFORE pushing the event. await self._check_user_exists(event.sender) if event.type == EventTypes.Member: await self._check_user_exists(event.state_key) if not self.started_scheduler: async def start_scheduler(): try: return await self.scheduler.start() except Exception: logger.error( "Application Services Failure") run_as_background_process("as_scheduler", start_scheduler) self.started_scheduler = True # Fork off pushes to these services for service in services: self.scheduler.submit_event_for_as(service, event) now = self.clock.time_msec() ts = await self.store.get_received_ts(event.event_id) synapse.metrics.event_processing_lag_by_event.labels( "appservice_sender").observe((now - ts) / 1000) async def handle_room_events(events): for event in events: await handle_event(event) await make_deferred_yieldable( defer.gatherResults( [ run_in_background(handle_room_events, evs) for evs in events_by_room.values() ], consumeErrors=True, )) await self.store.set_appservice_last_pos(upper_bound) now = self.clock.time_msec() ts = await self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_positions.labels( "appservice_sender").set(upper_bound) events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels( "appservice_sender").inc(len(events_by_room)) event_processing_loop_counter.labels( "appservice_sender").inc() synapse.metrics.event_processing_lag.labels( "appservice_sender").set(now - ts) synapse.metrics.event_processing_last_ts.labels( "appservice_sender").set(ts) finally: self.is_processing = False
def _process_event_queue_loop(self): try: self._is_processing = True while True: last_token = yield self.store.get_federation_out_pos("events") next_token, events = yield self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100, ) logger.debug("Handling %s -> %s", last_token, next_token) if not events and next_token >= self._last_poked_id: break @defer.inlineCallbacks def handle_event(event): # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of( ) is_mine = self.is_mine_id(event.event_id) if not is_mine and send_on_behalf_of is None: return try: # Get the state from before the event. # We need to make sure that this is the state from before # the event and not from after it. # Otherwise if the last member on a server in a room is # banned then it won't receive the event because it won't # be in the room after the ban. destinations = yield self.state.get_current_hosts_in_room( event.room_id, latest_event_ids=[ prev_id for prev_id, _ in event.prev_events ], ) except Exception: logger.exception( "Failed to calculate hosts in room for event: %s", event.event_id, ) return destinations = set(destinations) if send_on_behalf_of is not None: # If we are sending the event on behalf of another server # then it already has the event and there is no reason to # send the event to it. destinations.discard(send_on_behalf_of) logger.debug("Sending %s to %r", event, destinations) self._send_pdu(event, destinations) @defer.inlineCallbacks def handle_room_events(events): for event in events: yield handle_event(event) events_by_room = {} for event in events: events_by_room.setdefault(event.room_id, []).append(event) yield logcontext.make_deferred_yieldable( defer.gatherResults([ logcontext.run_in_background(handle_room_events, evs) for evs in itervalues(events_by_room) ], consumeErrors=True)) yield self.store.update_federation_out_pos( "events", next_token) if events: now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_lag.labels( "federation_sender").set(now - ts) synapse.metrics.event_processing_last_ts.labels( "federation_sender").set(ts) events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels( "federation_sender").inc(len(events_by_room)) event_processing_loop_counter.labels("federation_sender").inc() synapse.metrics.event_processing_positions.labels( "federation_sender").set(next_token) finally: self._is_processing = False
async def _process_event_queue_loop(self) -> None: try: self._is_processing = True while True: last_token = await self.store.get_federation_out_pos("events") next_token, events = await self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100) logger.debug("Handling %s -> %s", last_token, next_token) if not events and next_token >= self._last_poked_id: break async def handle_event(event: EventBase) -> None: # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of( ) is_mine = self.is_mine_id(event.sender) if not is_mine and send_on_behalf_of is None: return if not event.internal_metadata.should_proactively_send(): return try: # Get the state from before the event. # We need to make sure that this is the state from before # the event and not from after it. # Otherwise if the last member on a server in a room is # banned then it won't receive the event because it won't # be in the room after the ban. destinations = await self.state.get_hosts_in_room_at_events( event.room_id, event_ids=event.prev_event_ids()) except Exception: logger.exception( "Failed to calculate hosts in room for event: %s", event.event_id, ) return destinations = { d for d in destinations if self._federation_shard_config.should_handle( self._instance_name, d) } if send_on_behalf_of is not None: # If we are sending the event on behalf of another server # then it already has the event and there is no reason to # send the event to it. destinations.discard(send_on_behalf_of) logger.debug("Sending %s to %r", event, destinations) if destinations: self._send_pdu(event, destinations) now = self.clock.time_msec() ts = await self.store.get_received_ts(event.event_id) synapse.metrics.event_processing_lag_by_event.labels( "federation_sender").observe((now - ts) / 1000) async def handle_room_events( events: Iterable[EventBase]) -> None: with Measure(self.clock, "handle_room_events"): for event in events: await handle_event(event) events_by_room = {} # type: Dict[str, List[EventBase]] for event in events: events_by_room.setdefault(event.room_id, []).append(event) await make_deferred_yieldable( defer.gatherResults( [ run_in_background(handle_room_events, evs) for evs in events_by_room.values() ], consumeErrors=True, )) await self.store.update_federation_out_pos( "events", next_token) if events: now = self.clock.time_msec() ts = await self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_lag.labels( "federation_sender").set(now - ts) synapse.metrics.event_processing_last_ts.labels( "federation_sender").set(ts) events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels( "federation_sender").inc(len(events_by_room)) event_processing_loop_counter.labels("federation_sender").inc() synapse.metrics.event_processing_positions.labels( "federation_sender").set(next_token) finally: self._is_processing = False
def _process_event_queue_loop(self): try: self._is_processing = True while True: last_token = yield self.store.get_federation_out_pos("events") next_token, events = yield self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100, ) logger.debug("Handling %s -> %s", last_token, next_token) if not events and next_token >= self._last_poked_id: break @defer.inlineCallbacks def handle_event(event): # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of() is_mine = self.is_mine_id(event.sender) if not is_mine and send_on_behalf_of is None: return try: # Get the state from before the event. # We need to make sure that this is the state from before # the event and not from after it. # Otherwise if the last member on a server in a room is # banned then it won't receive the event because it won't # be in the room after the ban. destinations = yield self.state.get_current_hosts_in_room( event.room_id, latest_event_ids=event.prev_event_ids(), ) except Exception: logger.exception( "Failed to calculate hosts in room for event: %s", event.event_id, ) return destinations = set(destinations) if send_on_behalf_of is not None: # If we are sending the event on behalf of another server # then it already has the event and there is no reason to # send the event to it. destinations.discard(send_on_behalf_of) logger.debug("Sending %s to %r", event, destinations) self._send_pdu(event, destinations) @defer.inlineCallbacks def handle_room_events(events): for event in events: yield handle_event(event) events_by_room = {} for event in events: events_by_room.setdefault(event.room_id, []).append(event) yield logcontext.make_deferred_yieldable(defer.gatherResults( [ logcontext.run_in_background(handle_room_events, evs) for evs in itervalues(events_by_room) ], consumeErrors=True )) yield self.store.update_federation_out_pos( "events", next_token ) if events: now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_lag.labels( "federation_sender").set(now - ts) synapse.metrics.event_processing_last_ts.labels( "federation_sender").set(ts) events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels( "federation_sender" ).inc(len(events_by_room)) event_processing_loop_counter.labels("federation_sender").inc() synapse.metrics.event_processing_positions.labels( "federation_sender").set(next_token) finally: self._is_processing = False
def notify_interested_services(self, current_id): """Notifies (pushes) all application services interested in this event. Pushing is done asynchronously, so this method won't block for any prolonged length of time. Args: current_id(int): The current maximum ID. """ services = self.store.get_app_services() if not services or not self.notify_appservices: return self.current_max = max(self.current_max, current_id) if self.is_processing: return with Measure(self.clock, "notify_interested_services"): self.is_processing = True try: limit = 100 while True: upper_bound, events = yield self.store.get_new_events_for_appservice( self.current_max, limit ) if not events: break events_by_room = {} for event in events: events_by_room.setdefault(event.room_id, []).append(event) @defer.inlineCallbacks def handle_event(event): # Gather interested services services = yield self._get_services_for_event(event) if len(services) == 0: return # no services need notifying # Do we know this user exists? If not, poke the user # query API for all services which match that user regex. # This needs to block as these user queries need to be # made BEFORE pushing the event. yield self._check_user_exists(event.sender) if event.type == EventTypes.Member: yield self._check_user_exists(event.state_key) if not self.started_scheduler: def start_scheduler(): return self.scheduler.start().addErrback( log_failure, "Application Services Failure", ) run_as_background_process("as_scheduler", start_scheduler) self.started_scheduler = True # Fork off pushes to these services for service in services: self.scheduler.submit_event_for_as(service, event) @defer.inlineCallbacks def handle_room_events(events): for event in events: yield handle_event(event) yield make_deferred_yieldable(defer.gatherResults([ run_in_background(handle_room_events, evs) for evs in itervalues(events_by_room) ], consumeErrors=True)) yield self.store.set_appservice_last_pos(upper_bound) now = self.clock.time_msec() ts = yield self.store.get_received_ts(events[-1].event_id) synapse.metrics.event_processing_positions.labels( "appservice_sender").set(upper_bound) events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels( "appservice_sender" ).inc(len(events_by_room)) event_processing_loop_counter.labels("appservice_sender").inc() synapse.metrics.event_processing_lag.labels( "appservice_sender").set(now - ts) synapse.metrics.event_processing_last_ts.labels( "appservice_sender").set(ts) finally: self.is_processing = False
async def _process_event_queue_loop(self) -> None: try: self._is_processing = True while True: last_token = await self.store.get_federation_out_pos("events") next_token, events = await self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100 ) logger.debug( "Handling %i -> %i: %i events to send (current id %i)", last_token, next_token, len(events), self._last_poked_id, ) if not events and next_token >= self._last_poked_id: logger.debug("All events processed") break async def handle_event(event: EventBase) -> None: # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of() is_mine = self.is_mine_id(event.sender) if not is_mine and send_on_behalf_of is None: logger.debug("Not sending remote-origin event %s", event) return # We also want to not send out-of-band membership events. # # OOB memberships are used in three (and a half) situations: # # (1) invite events which we have received over federation. Those # will have a `sender` on a different server, so will be # skipped by the "is_mine" test above anyway. # # (2) rejections of invites to federated rooms - either remotely # or locally generated. (Such rejections are normally # created via federation, in which case the remote server is # responsible for sending out the rejection. If that fails, # we'll create a leave event locally, but that's only really # for the benefit of the invited user - we don't have enough # information to send it out over federation). # # (2a) rescinded knocks. These are identical to rejected invites. # # (3) knock events which we have sent over federation. As with # invite rejections, the remote server should send them out to # the federation. # # So, in all the above cases, we want to ignore such events. # # OOB memberships are always(?) outliers anyway, so if we *don't* # ignore them, we'll get an exception further down when we try to # fetch the membership list for the room. # # Arguably, we could equivalently ignore all outliers here, since # in theory the only way for an outlier with a local `sender` to # exist is by being an OOB membership (via one of (2), (2a) or (3) # above). # if event.internal_metadata.is_out_of_band_membership(): logger.debug("Not sending OOB membership event %s", event) return # Finally, there are some other events that we should not send out # until someone asks for them. They are explicitly flagged as such # with `proactively_send: False`. if not event.internal_metadata.should_proactively_send(): logger.debug( "Not sending event with proactively_send=false: %s", event ) return destinations: Optional[Collection[str]] = None if not event.prev_event_ids(): # If there are no prev event IDs then the state is empty # and so no remote servers in the room destinations = set() else: # We check the external cache for the destinations, which is # stored per state group. sg = await self._external_cache.get( "event_to_prev_state_group", event.event_id ) if sg: destinations = await self._external_cache.get( "get_joined_hosts", str(sg) ) if destinations is None: try: # Get the state from before the event. # We need to make sure that this is the state from before # the event and not from after it. # Otherwise if the last member on a server in a room is # banned then it won't receive the event because it won't # be in the room after the ban. destinations = await self.state.get_hosts_in_room_at_events( event.room_id, event_ids=event.prev_event_ids() ) except Exception: logger.exception( "Failed to calculate hosts in room for event: %s", event.event_id, ) return sharded_destinations = { d for d in destinations if self._federation_shard_config.should_handle( self._instance_name, d ) } if send_on_behalf_of is not None: # If we are sending the event on behalf of another server # then it already has the event and there is no reason to # send the event to it. sharded_destinations.discard(send_on_behalf_of) logger.debug("Sending %s to %r", event, sharded_destinations) if sharded_destinations: await self._send_pdu(event, sharded_destinations) now = self.clock.time_msec() ts = await self.store.get_received_ts(event.event_id) assert ts is not None synapse.metrics.event_processing_lag_by_event.labels( "federation_sender" ).observe((now - ts) / 1000) async def handle_room_events(events: List[EventBase]) -> None: logger.debug( "Handling %i events in room %s", len(events), events[0].room_id ) with Measure(self.clock, "handle_room_events"): for event in events: await handle_event(event) events_by_room: Dict[str, List[EventBase]] = {} for event in events: events_by_room.setdefault(event.room_id, []).append(event) await make_deferred_yieldable( defer.gatherResults( [ run_in_background(handle_room_events, evs) for evs in events_by_room.values() ], consumeErrors=True, ) ) logger.debug("Successfully handled up to %i", next_token) await self.store.update_federation_out_pos("events", next_token) if events: now = self.clock.time_msec() ts = await self.store.get_received_ts(events[-1].event_id) assert ts is not None synapse.metrics.event_processing_lag.labels( "federation_sender" ).set(now - ts) synapse.metrics.event_processing_last_ts.labels( "federation_sender" ).set(ts) events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels("federation_sender").inc( len(events_by_room) ) event_processing_loop_counter.labels("federation_sender").inc() synapse.metrics.event_processing_positions.labels( "federation_sender" ).set(next_token) finally: self._is_processing = False