def _set_before_and_after(events, rows): for event, row in zip(events, rows): stream = row["stream_ordering"] topo = event.depth internal = event.internal_metadata internal.before = str(RoomStreamToken(topo, stream - 1)) internal.after = str(RoomStreamToken(topo, stream))
def get_membership_changes_for_user(self, user_id, from_key, to_key): from_id = RoomStreamToken.parse_stream_token(from_key).stream to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: return [] if from_id: has_changed = self._membership_stream_cache.has_entity_changed( user_id, int(from_id)) if not has_changed: return [] def f(txn): sql = ("SELECT m.event_id, stream_ordering FROM events AS e," " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND e.stream_ordering > ? AND e.stream_ordering <= ?" " ORDER BY e.stream_ordering ASC") txn.execute(sql, (user_id, from_id, to_id)) rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] return rows rows = yield self.runInteraction("get_membership_changes_for_user", f) ret = yield self.get_events_as_list([r.event_id for r in rows], get_prev_content=True) self._set_before_and_after(ret, rows, topo_order=False) return ret
def _get_events_around_txn( self, txn, room_id, event_id, before_limit, after_limit, event_filter ): """Retrieves event_ids and pagination tokens around a given event in a room. Args: room_id (str) event_id (str) before_limit (int) after_limit (int) event_filter (Filter|None) Returns: dict """ results = self._simple_select_one_txn( txn, "events", keyvalues={"event_id": event_id, "room_id": room_id}, retcols=["stream_ordering", "topological_ordering"], ) # Paginating backwards includes the event at the token, but paginating # forward doesn't. before_token = RoomStreamToken( results["topological_ordering"] - 1, results["stream_ordering"] ) after_token = RoomStreamToken( results["topological_ordering"], results["stream_ordering"] ) rows, start_token = self._paginate_room_events_txn( txn, room_id, before_token, direction='b', limit=before_limit, event_filter=event_filter, ) events_before = [r.event_id for r in rows] rows, end_token = self._paginate_room_events_txn( txn, room_id, after_token, direction='f', limit=after_limit, event_filter=event_filter, ) events_after = [r.event_id for r in rows] return { "before": {"event_ids": events_before, "token": start_token}, "after": {"event_ids": events_after, "token": end_token}, }
def get_recent_event_ids_for_room(self, room_id, limit, end_token, from_token=None): end_token = RoomStreamToken.parse_stream_token(end_token) if from_token is None: sql = ( "SELECT stream_ordering, topological_ordering, event_id" " FROM events" " WHERE room_id = ? AND stream_ordering <= ? AND outlier = ?" " ORDER BY topological_ordering DESC, stream_ordering DESC" " LIMIT ?") else: from_token = RoomStreamToken.parse_stream_token(from_token) sql = ("SELECT stream_ordering, topological_ordering, event_id" " FROM events" " WHERE room_id = ? AND stream_ordering > ?" " AND stream_ordering <= ? AND outlier = ?" " ORDER BY topological_ordering DESC, stream_ordering DESC" " LIMIT ?") def get_recent_events_for_room_txn(txn): if from_token is None: txn.execute(sql, ( room_id, end_token.stream, False, limit, )) else: txn.execute(sql, (room_id, from_token.stream, end_token.stream, False, limit)) rows = self.cursor_to_dict(txn) rows.reverse() # As we selected with reverse ordering if rows: # Tokens are positions between events. # This token points *after* the last event in the chunk. # We need it to point to the event before it in the chunk # since we are going backwards so we subtract one from the # stream part. topo = rows[0]["topological_ordering"] toke = rows[0]["stream_ordering"] - 1 start_token = str(RoomStreamToken(topo, toke)) token = (start_token, str(end_token)) else: token = (str(end_token), str(end_token)) return rows, token return self.runInteraction("get_recent_events_for_room", get_recent_events_for_room_txn)
def get_recent_event_ids_for_room(self, room_id, limit, end_token, from_token=None): end_token = RoomStreamToken.parse_stream_token(end_token) if from_token is None: sql = ( "SELECT stream_ordering, topological_ordering, event_id" " FROM events" " WHERE room_id = ? AND stream_ordering <= ? AND outlier = ?" " ORDER BY topological_ordering DESC, stream_ordering DESC" " LIMIT ?" ) else: from_token = RoomStreamToken.parse_stream_token(from_token) sql = ( "SELECT stream_ordering, topological_ordering, event_id" " FROM events" " WHERE room_id = ? AND stream_ordering > ?" " AND stream_ordering <= ? AND outlier = ?" " ORDER BY topological_ordering DESC, stream_ordering DESC" " LIMIT ?" ) def get_recent_events_for_room_txn(txn): if from_token is None: txn.execute(sql, (room_id, end_token.stream, False, limit,)) else: txn.execute(sql, ( room_id, from_token.stream, end_token.stream, False, limit )) rows = self.cursor_to_dict(txn) rows.reverse() # As we selected with reverse ordering if rows: # Tokens are positions between events. # This token points *after* the last event in the chunk. # We need it to point to the event before it in the chunk # since we are going backwards so we subtract one from the # stream part. topo = rows[0]["topological_ordering"] toke = rows[0]["stream_ordering"] - 1 start_token = str(RoomStreamToken(topo, toke)) token = (start_token, str(end_token)) else: token = (str(end_token), str(end_token)) return rows, token return self.runInteraction( "get_recent_events_for_room", get_recent_events_for_room_txn )
def get_membership_changes_for_user(self, user_id, from_key, to_key): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: from_id = None to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: defer.returnValue([]) if from_id: has_changed = self._membership_stream_cache.has_entity_changed( user_id, int(from_id) ) if not has_changed: defer.returnValue([]) def f(txn): if from_id is not None: sql = ( "SELECT m.event_id, stream_ordering FROM events AS e," " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND e.stream_ordering > ? AND e.stream_ordering <= ?" " ORDER BY e.stream_ordering ASC" ) txn.execute(sql, (user_id, from_id, to_id,)) else: sql = ( "SELECT m.event_id, stream_ordering FROM events AS e," " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND stream_ordering <= ?" " ORDER BY stream_ordering ASC" ) txn.execute(sql, (user_id, to_id,)) rows = self.cursor_to_dict(txn) return rows rows = yield self.runInteraction("get_membership_changes_for_user", f) ret = yield self._get_events( [r["event_id"] for r in rows], get_prev_content=True ) self._set_before_and_after(ret, rows, topo_order=False) defer.returnValue(ret)
def get_membership_changes_for_user(self, user_id, from_key, to_key): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: from_id = None to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: defer.returnValue([]) if from_id: has_changed = self._membership_stream_cache.has_entity_changed( user_id, int(from_id)) if not has_changed: defer.returnValue([]) def f(txn): if from_id is not None: sql = ("SELECT m.event_id, stream_ordering FROM events AS e," " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND e.stream_ordering > ? AND e.stream_ordering <= ?" " ORDER BY e.stream_ordering ASC") txn.execute(sql, ( user_id, from_id, to_id, )) else: sql = ("SELECT m.event_id, stream_ordering FROM events AS e," " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND stream_ordering <= ?" " ORDER BY stream_ordering ASC") txn.execute(sql, ( user_id, to_id, )) rows = self.cursor_to_dict(txn) return rows rows = yield self.runInteraction("get_membership_changes_for_user", f) ret = yield self._get_events([r["event_id"] for r in rows], get_prev_content=True) self._set_before_and_after(ret, rows, topo_order=False) defer.returnValue(ret)
def _set_before_and_after(events, rows, topo_order=True): for event, row in zip(events, rows): stream = row["stream_ordering"] if topo_order: topo = event.depth else: topo = None internal = event.internal_metadata internal.before = str(RoomStreamToken(topo, stream - 1)) internal.after = str(RoomStreamToken(topo, stream)) internal.order = ( int(topo) if topo else 0, int(stream), )
async def paginate_room_events( self, room_id: str, from_key: str, to_key: Optional[str] = None, direction: str = "b", limit: int = -1, event_filter: Optional[Filter] = None, ) -> Tuple[List[EventBase], str]: """Returns list of events before or after a given token. Args: room_id from_key: The token used to stream from to_key: A token which if given limits the results to only those before direction: Either 'b' or 'f' to indicate whether we are paginating forwards or backwards from `from_key`. limit: The maximum number of events to return. event_filter: If provided filters the events to those that match the filter. Returns: The results as a list of events and a token that points to the end of the result set. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). """ from_key = RoomStreamToken.parse(from_key) if to_key: to_key = RoomStreamToken.parse(to_key) rows, token = await self.db_pool.runInteraction( "paginate_room_events", self._paginate_room_events_txn, room_id, from_key, to_key, direction, limit, event_filter, ) events = await self.get_events_as_list( [r.event_id for r in rows], get_prev_content=True ) self._set_before_and_after(events, rows) return (events, token)
def paginate_room_events(self, room_id, from_key, to_key=None, direction='b', limit=-1, event_filter=None): """Returns list of events before or after a given token. Args: room_id (str) from_key (str): The token used to stream from to_key (str|None): A token which if given limits the results to only those before direction(char): Either 'b' or 'f' to indicate whether we are paginating forwards or backwards from `from_key`. limit (int): The maximum number of events to return. Zero or less means no limit. event_filter (Filter|None): If provided filters the events to those that match the filter. Returns: tuple[list[dict], str]: Returns the results as a list of dicts and a token that points to the end of the result set. The dicts have the keys "event_id", "topological_ordering" and "stream_orderign". """ from_key = RoomStreamToken.parse(from_key) if to_key: to_key = RoomStreamToken.parse(to_key) rows, token = yield self.runInteraction( "paginate_room_events", self._paginate_room_events_txn, room_id, from_key, to_key, direction, limit, event_filter, ) events = yield self._get_events([r.event_id for r in rows], get_prev_content=True) self._set_before_and_after(events, rows) defer.returnValue((events, token))
def paginate_room_events(self, room_id, from_key, to_key=None, direction="b", limit=-1, event_filter=None): """Returns list of events before or after a given token. Args: room_id (str) from_key (str): The token used to stream from to_key (str|None): A token which if given limits the results to only those before direction(char): Either 'b' or 'f' to indicate whether we are paginating forwards or backwards from `from_key`. limit (int): The maximum number of events to return. event_filter (Filter|None): If provided filters the events to those that match the filter. Returns: tuple[list[FrozenEvent], str]: Returns the results as a list of events and a token that points to the end of the result set. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). """ from_key = RoomStreamToken.parse(from_key) if to_key: to_key = RoomStreamToken.parse(to_key) rows, token = yield self.runInteraction( "paginate_room_events", self._paginate_room_events_txn, room_id, from_key, to_key, direction, limit, event_filter, ) events = yield self.get_events_as_list([r.event_id for r in rows], get_prev_content=True) self._set_before_and_after(events, rows) return (events, token)
def get_recent_event_ids_for_room(self, room_id, limit, end_token): """Get the most recent events in the room in topological ordering. Args: room_id (str) limit (int) end_token (str): The stream token representing now. Returns: Deferred[tuple[list[_EventDictReturn], str]]: Returns a list of _EventDictReturn and a token pointing to the start of the returned events. The events returned are in ascending order. """ # Allow a zero limit here, and no-op. if limit == 0: defer.returnValue(([], end_token)) end_token = RoomStreamToken.parse(end_token) rows, token = yield self.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_txn, room_id, from_token=end_token, limit=limit, ) # We want to return the results in ascending order. rows.reverse() defer.returnValue((rows, token))
def _get_unread_event_push_actions_by_room(txn): sql = ("SELECT stream_ordering, topological_ordering" " FROM events" " WHERE room_id = ? AND event_id = ?") txn.execute(sql, (room_id, last_read_event_id)) results = txn.fetchall() if len(results) == 0: return {"notify_count": 0, "highlight_count": 0} stream_ordering = results[0][0] topological_ordering = results[0][1] token = RoomStreamToken(topological_ordering, stream_ordering) sql = ("SELECT sum(notif), sum(highlight)" " FROM event_push_actions ea" " WHERE" " user_id = ?" " AND room_id = ?" " AND %s") % (lower_bound( token, self.database_engine, inclusive=False), ) txn.execute(sql, (user_id, room_id)) row = txn.fetchone() if row: return { "notify_count": row[0] or 0, "highlight_count": row[1] or 0, } else: return {"notify_count": 0, "highlight_count": 0}
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0, order='DESC'): from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = yield self._events_stream_cache.get_entities_changed( room_ids, from_id) if not room_ids: defer.returnValue({}) results = {} room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in range(0, len(room_ids), 20)): res = yield make_deferred_yieldable( defer.gatherResults([ run_in_background( self.get_room_events_stream_for_room, room_id, from_key, to_key, limit, order=order, ) for room_id in rm_ids ], consumeErrors=True)) results.update(dict(zip(rm_ids, res))) defer.returnValue(results)
def get_room_max_token(self) -> RoomStreamToken: """Get a `RoomStreamToken` that marks the current maximum persisted position of the events stream. Useful to get a token that represents "now". The token returned is a "live" token that may have an instance_map component. """ min_pos = self._stream_id_gen.get_current_token() positions = {} if isinstance(self._stream_id_gen, MultiWriterIdGenerator): # The `min_pos` is the minimum position that we know all instances # have finished persisting to, so we only care about instances whose # positions are ahead of that. (Instance positions can be behind the # min position as there are times we can work out that the minimum # position is ahead of the naive minimum across all current # positions. See MultiWriterIdGenerator for details) positions = { i: p for i, p in self._stream_id_gen.get_positions().items() if p > min_pos } return RoomStreamToken(None, min_pos, frozendict(positions))
async def get_recent_event_ids_for_room( self, room_id: str, limit: int, end_token: str ) -> Tuple[List[_EventDictReturn], str]: """Get the most recent events in the room in topological ordering. Args: room_id limit end_token: The stream token representing now. Returns: A list of _EventDictReturn and a token pointing to the start of the returned events. The events returned are in ascending order. """ # Allow a zero limit here, and no-op. if limit == 0: return [], end_token end_token = RoomStreamToken.parse(end_token) rows, token = await self.db_pool.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_txn, room_id, from_token=end_token, limit=limit, ) # We want to return the results in ascending order. rows.reverse() return rows, token
def get_recent_events_for_room_txn(txn): if from_token is None: txn.execute(sql, ( room_id, end_token.stream, False, limit, )) else: txn.execute(sql, (room_id, from_token.stream, end_token.stream, False, limit)) rows = self.cursor_to_dict(txn) rows.reverse() # As we selected with reverse ordering if rows: # Tokens are positions between events. # This token points *after* the last event in the chunk. # We need it to point to the event before it in the chunk # since we are going backwards so we subtract one from the # stream part. topo = rows[0]["topological_ordering"] toke = rows[0]["stream_ordering"] - 1 start_token = str(RoomStreamToken(topo, toke)) token = (start_token, str(end_token)) else: token = (str(end_token), str(end_token)) return rows, token
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0, order='DESC'): from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = yield self._events_stream_cache.get_entities_changed( room_ids, from_id) if not room_ids: defer.returnValue({}) results = {} room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield preserve_context_over_deferred( defer.gatherResults([ preserve_fn(self.get_room_events_stream_for_room)( room_id, from_key, to_key, limit, order=order, ) for room_id in rm_ids ])) results.update(dict(zip(rm_ids, res))) defer.returnValue(results)
def _have_rooms_changed(self, sync_result_builder): """Returns whether there may be any new events that should be sent down the sync. Returns True if there are. """ user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token now_token = sync_result_builder.now_token assert since_token # Get a list of membership change events that have happened. rooms_changed = yield self.store.get_membership_changes_for_user( user_id, since_token.room_key, now_token.room_key ) if rooms_changed: defer.returnValue(True) app_service = self.store.get_app_service_by_user_id(user_id) if app_service: rooms = yield self.store.get_app_service_rooms(app_service) joined_room_ids = set(r.room_id for r in rooms) else: joined_room_ids = yield self.store.get_rooms_for_user(user_id) stream_id = RoomStreamToken.parse_stream_token(since_token.room_key).stream for room_id in joined_room_ids: if self.store.has_room_changed_since(room_id, stream_id): defer.returnValue(True) defer.returnValue(False)
async def get_room_events_stream_for_rooms( self, room_ids: Iterable[str], from_key: str, to_key: str, limit: int = 0, order: str = "DESC", ) -> Dict[str, Tuple[List[EventBase], str]]: """Get new room events in stream ordering since `from_key`. Args: room_ids from_key: Token from which no events are returned before to_key: Token from which no events are returned after. (This is typically the current stream token) limit: Maximum number of events to return order: Either "DESC" or "ASC". Determines which events are returned when the result is limited. If "DESC" then the most recent `limit` events are returned, otherwise returns the oldest `limit` events. Returns: A map from room id to a tuple containing: - list of recent events in the room - stream ordering key for the start of the chunk of events returned. """ from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = self._events_stream_cache.get_entities_changed(room_ids, from_id) if not room_ids: return {} results = {} room_ids = list(room_ids) for rm_ids in (room_ids[i : i + 20] for i in range(0, len(room_ids), 20)): res = await make_deferred_yieldable( defer.gatherResults( [ run_in_background( self.get_room_events_stream_for_room, room_id, from_key, to_key, limit, order=order, ) for room_id in rm_ids ], consumeErrors=True, ) ) results.update(dict(zip(rm_ids, res))) return results
def get_new_events( self, user, from_key, limit, room_ids, is_guest, ): # We just ignore the key for now. to_key = yield self.get_current_key() from_token = RoomStreamToken.parse(from_key) if from_token.topological: logger.warn("Stream has topological part!!!! %r", from_key) from_key = "s%s" % (from_token.stream,) app_service = self.store.get_app_service_by_user_id( user.to_string() ) if app_service: events, end_key = yield self.store.get_appservice_room_stream( service=app_service, from_key=from_key, to_key=to_key, limit=limit, ) else: room_events = yield self.store.get_membership_changes_for_user( user.to_string(), from_key, to_key ) room_to_events = yield self.store.get_room_events_stream_for_rooms( room_ids=room_ids, from_key=from_key, to_key=to_key, limit=limit or 10, order='ASC', ) events = list(room_events) events.extend(e for evs, _ in room_to_events.values() for e in evs) events.sort(key=lambda e: e.internal_metadata.order) if limit: events[:] = events[:limit] if events: end_key = events[-1].internal_metadata.after else: end_key = to_key defer.returnValue((events, end_key))
def get_messages(self, user_id=None, room_id=None, pagin_config=None, feedback=False, as_client_event=True): """Get messages in a room. Args: user_id (str): The user requesting messages. room_id (str): The room they want messages from. pagin_config (synapse.api.streams.PaginationConfig): The pagination config rules to apply, if any. feedback (bool): True to get compressed feedback with the messages as_client_event (bool): True to get events in client-server format. Returns: dict: Pagination API results """ yield self.auth.check_joined_room(room_id, user_id) data_source = self.hs.get_event_sources().sources["room"] if not pagin_config.from_token: pagin_config.from_token = ( yield self.hs.get_event_sources().get_current_token( direction='b' ) ) room_token = RoomStreamToken.parse(pagin_config.from_token.room_key) if room_token.topological is None: raise SynapseError(400, "Invalid token") yield self.hs.get_handlers().federation_handler.maybe_backfill( room_id, room_token.topological ) user = UserID.from_string(user_id) events, next_key = yield data_source.get_pagination_rows( user, pagin_config.get_source_config("room"), room_id ) next_token = pagin_config.from_token.copy_and_replace( "room_key", next_key ) time_now = self.clock.time_msec() chunk = { "chunk": [ serialize_event(e, time_now, as_client_event) for e in events ], "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), } defer.returnValue(chunk)
def get_messages(self, user_id=None, room_id=None, pagin_config=None, feedback=False, as_client_event=True): """Get messages in a room. Args: user_id (str): The user requesting messages. room_id (str): The room they want messages from. pagin_config (synapse.api.streams.PaginationConfig): The pagination config rules to apply, if any. feedback (bool): True to get compressed feedback with the messages as_client_event (bool): True to get events in client-server format. Returns: dict: Pagination API results """ yield self.auth.check_joined_room(room_id, user_id) data_source = self.hs.get_event_sources().sources["room"] if not pagin_config.from_token: pagin_config.from_token = ( yield self.hs.get_event_sources().get_current_token(direction='b')) room_token = RoomStreamToken.parse(pagin_config.from_token.room_key) if room_token.topological is None: raise SynapseError(400, "Invalid token") yield self.hs.get_handlers().federation_handler.maybe_backfill( room_id, room_token.topological) user = UserID.from_string(user_id) events, next_key = yield data_source.get_pagination_rows( user, pagin_config.get_source_config("room"), room_id) next_token = pagin_config.from_token.copy_and_replace( "room_key", next_key) time_now = self.clock.time_msec() chunk = { "chunk": [serialize_event(e, time_now, as_client_event) for e in events], "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), } defer.returnValue(chunk)
def get_new_events( self, user, from_key, limit, room_ids, is_guest, ): # We just ignore the key for now. to_key = yield self.get_current_key() from_token = RoomStreamToken.parse(from_key) if from_token.topological: logger.warn("Stream has topological part!!!! %r", from_key) from_key = "s%s" % (from_token.stream,) app_service = yield self.store.get_app_service_by_user_id( user.to_string() ) if app_service: events, end_key = yield self.store.get_appservice_room_stream( service=app_service, from_key=from_key, to_key=to_key, limit=limit, ) else: room_events = yield self.store.get_membership_changes_for_user( user.to_string(), from_key, to_key ) room_to_events = yield self.store.get_room_events_stream_for_rooms( room_ids=room_ids, from_key=from_key, to_key=to_key, limit=limit or 10, order='ASC', ) events = list(room_events) events.extend(e for evs, _ in room_to_events.values() for e in evs) events.sort(key=lambda e: e.internal_metadata.order) if limit: events[:] = events[:limit] if events: end_key = events[-1].internal_metadata.after else: end_key = to_key defer.returnValue((events, end_key))
def _set_before_and_after(events, rows, topo_order=True): """Inserts ordering information to events' internal metadata from the DB rows. Args: events (list[FrozenEvent]) rows (list[_EventDictReturn]) topo_order (bool): Whether the events were ordered topologically or by stream ordering. If true then all rows should have a non null topological_ordering. """ for event, row in zip(events, rows): stream = row.stream_ordering if topo_order and row.topological_ordering: topo = row.topological_ordering else: topo = None internal = event.internal_metadata internal.before = str(RoomStreamToken(topo, stream - 1)) internal.after = str(RoomStreamToken(topo, stream)) internal.order = (int(topo) if topo else 0, int(stream))
def get_rooms_that_changed(self, room_ids, from_key): """Given a list of rooms and a token, return rooms where there may have been changes. Args: room_ids (list) from_key (str): The room_key portion of a StreamToken """ from_key = RoomStreamToken.parse_stream_token(from_key).stream return set( room_id for room_id in room_ids if self._events_stream_cache.has_entity_changed(room_id, from_key))
def get_new_events( self, user, from_key, limit, room_ids, is_guest, explicit_room_id=None, ): # We just ignore the key for now. to_key = yield self.get_current_key() from_token = RoomStreamToken.parse(from_key) if from_token.topological: logger.warn("Stream has topological part!!!! %r", from_key) from_key = "s%s" % (from_token.stream,) app_service = self.store.get_app_service_by_user_id( user.to_string() ) if app_service: # We no longer support AS users using /sync directly. # See https://github.com/matrix-org/matrix-doc/issues/1144 raise NotImplementedError() else: room_events = yield self.store.get_membership_changes_for_user( user.to_string(), from_key, to_key ) room_to_events = yield self.store.get_room_events_stream_for_rooms( room_ids=room_ids, from_key=from_key, to_key=to_key, limit=limit or 10, order='ASC', ) events = list(room_events) events.extend(e for evs, _ in room_to_events.values() for e in evs) events.sort(key=lambda e: e.internal_metadata.order) if limit: events[:] = events[:limit] if events: end_key = events[-1].internal_metadata.after else: end_key = to_key defer.returnValue((events, end_key))
def get_rooms_that_changed(self, room_ids, from_key): """Given a list of rooms and a token, return rooms where there may have been changes. Args: room_ids (list) from_key (str): The room_key portion of a StreamToken """ from_key = RoomStreamToken.parse_stream_token(from_key).stream return set( room_id for room_id in room_ids if self._events_stream_cache.has_entity_changed(room_id, from_key) )
def paginate_room_events(self, room_id, from_key, to_key=None, direction='b', limit=-1, event_filter=None): """Returns list of events before or after a given token. Args: room_id (str) from_key (str): The token used to stream from to_key (str|None): A token which if given limits the results to only those before direction(char): Either 'b' or 'f' to indicate whether we are paginating forwards or backwards from `from_key`. limit (int): The maximum number of events to return. Zero or less means no limit. event_filter (Filter|None): If provided filters the events to those that match the filter. Returns: tuple[list[dict], str]: Returns the results as a list of dicts and a token that points to the end of the result set. The dicts have the keys "event_id", "topological_ordering" and "stream_orderign". """ from_key = RoomStreamToken.parse(from_key) if to_key: to_key = RoomStreamToken.parse(to_key) rows, token = yield self.runInteraction( "paginate_room_events", self._paginate_room_events_txn, room_id, from_key, to_key, direction, limit, event_filter, ) events = yield self._get_events( [r.event_id for r in rows], get_prev_content=True ) self._set_before_and_after(events, rows) defer.returnValue((events, token))
def get_user_ids_changed(self, user_id, from_token): """Get list of users that have had the devices updated, or have newly joined a room, that `user_id` may be interested in. Args: user_id (str) from_token (StreamToken) """ rooms = yield self.store.get_rooms_for_user(user_id) room_ids = set(r.room_id for r in rooms) # First we check if any devices have changed changed = yield self.store.get_user_whose_devices_changed( from_token.device_list_key) # Then work out if any users have since joined rooms_changed = self.store.get_rooms_that_changed( room_ids, from_token.room_key) possibly_changed = set(changed) for room_id in rooms_changed: # Fetch the current state at the time. stream_ordering = RoomStreamToken.parse_stream_token( from_token.room_key) try: event_ids = yield self.store.get_forward_extremeties_for_room( room_id, stream_ordering=stream_ordering) prev_state_ids = yield self.store.get_state_ids_for_events( event_ids) except: prev_state_ids = {} current_state_ids = yield self.state.get_current_state_ids(room_id) # If there has been any change in membership, include them in the # possibly changed list. We'll check if they are joined below, # and we're not toooo worried about spuriously adding users. for key, event_id in current_state_ids.iteritems(): etype, state_key = key if etype == EventTypes.Member: prev_event_id = prev_state_ids.get(key, None) if not prev_event_id or prev_event_id != event_id: possibly_changed.add(state_key) users_who_share_room = yield self.store.get_users_who_share_room_with_user( user_id) # Take the intersection of the users whose devices may have changed # and those that actually still share a room with the user defer.returnValue(users_who_share_room & possibly_changed)
def _get_unread_counts_by_pos_txn(self, txn, room_id, user_id, topological_ordering, stream_ordering): token = RoomStreamToken(topological_ordering, stream_ordering) # First get number of notifications. # We don't need to put a notif=1 clause as all rows always have # notif=1 sql = ("SELECT count(*)" " FROM event_push_actions ea" " WHERE" " user_id = ?" " AND room_id = ?" " AND %s") % (lower_bound( token, self.database_engine, inclusive=False), ) txn.execute(sql, (user_id, room_id)) row = txn.fetchone() notify_count = row[0] if row else 0 txn.execute( """ SELECT notif_count FROM event_push_summary WHERE room_id = ? AND user_id = ? AND stream_ordering > ? """, ( room_id, user_id, stream_ordering, )) rows = txn.fetchall() if rows: notify_count += rows[0][0] # Now get the number of highlights sql = ("SELECT count(*)" " FROM event_push_actions ea" " WHERE" " highlight = 1" " AND user_id = ?" " AND room_id = ?" " AND %s") % (lower_bound( token, self.database_engine, inclusive=False), ) txn.execute(sql, (user_id, room_id)) row = txn.fetchone() highlight_count = row[0] if row else 0 return { "notify_count": notify_count, "highlight_count": highlight_count, }
def _set_before_and_after(events: List[EventBase], rows: List[_EventDictReturn], topo_order: bool = True): """Inserts ordering information to events' internal metadata from the DB rows. Args: events rows topo_order: Whether the events were ordered topologically or by stream ordering. If true then all rows should have a non null topological_ordering. """ for event, row in zip(events, rows): stream = row.stream_ordering if topo_order and row.topological_ordering: topo: Optional[int] = row.topological_ordering else: topo = None internal = event.internal_metadata internal.before = RoomStreamToken(topo, stream - 1) internal.after = RoomStreamToken(topo, stream) internal.order = (int(topo) if topo else 0, int(stream))
def get_rooms_that_changed(self, room_ids: Collection[str], from_key: str) -> Set[str]: """Given a list of rooms and a token, return rooms where there may have been changes. Args: room_ids from_key: The room_key portion of a StreamToken """ from_id = RoomStreamToken.parse_stream_token(from_key).stream return { room_id for room_id in room_ids if self._events_stream_cache.has_entity_changed(room_id, from_id) }
def test_query_user_exists_unknown_user(self): user_id = "@someone:anywhere" services = [self._mkservice(is_interested=True)] services[0].is_interested_in_user.return_value = True self.mock_store.get_app_services.return_value = services self.mock_store.get_user_by_id.return_value = make_awaitable(None) event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar") self.mock_as_api.query_user.return_value = make_awaitable(True) self.mock_store.get_new_events_for_appservice.side_effect = [ make_awaitable((0, [event])), ] self.handler.notify_interested_services(RoomStreamToken(None, 0)) self.mock_as_api.query_user.assert_called_once_with(services[0], user_id)
def _get_unread_event_push_actions_by_room(txn): sql = ("SELECT stream_ordering, topological_ordering" " FROM events" " WHERE room_id = ? AND event_id = ?") txn.execute(sql, (room_id, last_read_event_id)) results = txn.fetchall() if len(results) == 0: return {"notify_count": 0, "highlight_count": 0} stream_ordering = results[0][0] topological_ordering = results[0][1] token = RoomStreamToken(topological_ordering, stream_ordering) # First get number of notifications. # We don't need to put a notif=1 clause as all rows always have # notif=1 sql = ("SELECT count(*)" " FROM event_push_actions ea" " WHERE" " user_id = ?" " AND room_id = ?" " AND %s") % (lower_bound( token, self.database_engine, inclusive=False), ) txn.execute(sql, (user_id, room_id)) row = txn.fetchone() notify_count = row[0] if row else 0 # Now get the number of highlights sql = ("SELECT count(*)" " FROM event_push_actions ea" " WHERE" " highlight = 1" " AND user_id = ?" " AND room_id = ?" " AND %s") % (lower_bound( token, self.database_engine, inclusive=False), ) txn.execute(sql, (user_id, room_id)) row = txn.fetchone() highlight_count = row[0] if row else 0 return { "notify_count": notify_count, "highlight_count": highlight_count, }
async def get_topological_token_for_event( self, event_id: str) -> RoomStreamToken: """The stream token for an event Args: event_id: The id of the event to look up a stream token for. Raises: StoreError if the event wasn't in the database. Returns: A `RoomStreamToken` topological token. """ row = await self.db_pool.simple_select_one( table="events", keyvalues={"event_id": event_id}, retcols=("stream_ordering", "topological_ordering"), desc="get_topological_token_for_event", ) return RoomStreamToken(row["topological_ordering"], row["stream_ordering"])
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0, order='DESC'): from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = yield self._events_stream_cache.get_entities_changed( room_ids, from_id ) if not room_ids: defer.returnValue({}) results = {} room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ preserve_fn(self.get_room_events_stream_for_room)( room_id, from_key, to_key, limit, order=order, ) for room_id in rm_ids ]) results.update(dict(zip(rm_ids, res))) defer.returnValue(results)
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0, order='DESC'): from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = yield self._events_stream_cache.get_entities_changed( room_ids, from_id ) if not room_ids: defer.returnValue({}) results = {} room_ids = list(room_ids) for rm_ids in (room_ids[i:i + 20] for i in range(0, len(room_ids), 20)): res = yield make_deferred_yieldable(defer.gatherResults([ run_in_background( self.get_room_events_stream_for_room, room_id, from_key, to_key, limit, order=order, ) for room_id in rm_ids ], consumeErrors=True)) results.update(dict(zip(rm_ids, res))) defer.returnValue(results)
def get_messages(self, user_id=None, room_id=None, pagin_config=None, as_client_event=True, is_guest=False): """Get messages in a room. Args: user_id (str): The user requesting messages. room_id (str): The room they want messages from. pagin_config (synapse.api.streams.PaginationConfig): The pagination config rules to apply, if any. as_client_event (bool): True to get events in client-server format. is_guest (bool): Whether the requesting user is a guest (as opposed to a fully registered user). Returns: dict: Pagination API results """ data_source = self.hs.get_event_sources().sources["room"] if pagin_config.from_token: room_token = pagin_config.from_token.room_key else: pagin_config.from_token = ( yield self.hs.get_event_sources().get_current_token( direction='b' ) ) room_token = pagin_config.from_token.room_key room_token = RoomStreamToken.parse(room_token) if room_token.topological is None: raise SynapseError(400, "Invalid token") pagin_config.from_token = pagin_config.from_token.copy_and_replace( "room_key", str(room_token) ) source_config = pagin_config.get_source_config("room") if not is_guest: member_event = yield self.auth.check_user_was_in_room(room_id, user_id) if member_event.membership == Membership.LEAVE: # If they have left the room then clamp the token to be before # they left the room. # If they're a guest, we'll just 403 them if they're asking for # events they can't see. leave_token = yield self.store.get_topological_token_for_event( member_event.event_id ) leave_token = RoomStreamToken.parse(leave_token) if leave_token.topological < room_token.topological: source_config.from_key = str(leave_token) if source_config.direction == "f": if source_config.to_key is None: source_config.to_key = str(leave_token) else: to_token = RoomStreamToken.parse(source_config.to_key) if leave_token.topological < to_token.topological: source_config.to_key = str(leave_token) yield self.hs.get_handlers().federation_handler.maybe_backfill( room_id, room_token.topological ) user = UserID.from_string(user_id) events, next_key = yield data_source.get_pagination_rows( user, source_config, room_id ) next_token = pagin_config.from_token.copy_and_replace( "room_key", next_key ) if not events: defer.returnValue({ "chunk": [], "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), }) events = yield self._filter_events_for_client(user_id, events, is_guest=is_guest) time_now = self.clock.time_msec() chunk = { "chunk": [ serialize_event(e, time_now, as_client_event) for e in events ], "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), } defer.returnValue(chunk)
def get_messages(self, requester, room_id=None, pagin_config=None, as_client_event=True): """Get messages in a room. Args: requester (Requester): The user requesting messages. room_id (str): The room they want messages from. pagin_config (synapse.api.streams.PaginationConfig): The pagination config rules to apply, if any. as_client_event (bool): True to get events in client-server format. Returns: dict: Pagination API results """ user_id = requester.user.to_string() data_source = self.hs.get_event_sources().sources["room"] if pagin_config.from_token: room_token = pagin_config.from_token.room_key else: pagin_config.from_token = ( yield self.hs.get_event_sources().get_current_token( direction='b' ) ) room_token = pagin_config.from_token.room_key room_token = RoomStreamToken.parse(room_token) pagin_config.from_token = pagin_config.from_token.copy_and_replace( "room_key", str(room_token) ) source_config = pagin_config.get_source_config("room") membership, member_event_id = yield self._check_in_room_or_world_readable( room_id, user_id ) if source_config.direction == 'b': # if we're going backwards, we might need to backfill. This # requires that we have a topo token. if room_token.topological: max_topo = room_token.topological else: max_topo = yield self.store.get_max_topological_token_for_stream_and_room( room_id, room_token.stream ) if membership == Membership.LEAVE: # If they have left the room then clamp the token to be before # they left the room, to save the effort of loading from the # database. leave_token = yield self.store.get_topological_token_for_event( member_event_id ) leave_token = RoomStreamToken.parse(leave_token) if leave_token.topological < max_topo: source_config.from_key = str(leave_token) yield self.hs.get_handlers().federation_handler.maybe_backfill( room_id, max_topo ) events, next_key = yield data_source.get_pagination_rows( requester.user, source_config, room_id ) next_token = pagin_config.from_token.copy_and_replace( "room_key", next_key ) if not events: defer.returnValue({ "chunk": [], "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), }) events = yield self._filter_events_for_client( user_id, events, is_peeking=(member_event_id is None), ) time_now = self.clock.time_msec() chunk = { "chunk": [ serialize_event(e, time_now, as_client_event) for e in events ], "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), } defer.returnValue(chunk)
def get_room_events_stream( self, user_id, from_key, to_key, limit=0, is_guest=False, room_ids=None ): room_ids = room_ids or [] room_ids = [r for r in room_ids] if is_guest: current_room_membership_sql = ( "SELECT c.room_id FROM history_visibility AS h" " INNER JOIN current_state_events AS c" " ON h.event_id = c.event_id" " WHERE c.room_id IN (%s)" " AND h.history_visibility = 'world_readable'" % ( ",".join(map(lambda _: "?", room_ids)) ) ) current_room_membership_args = room_ids else: current_room_membership_sql = ( "SELECT m.room_id FROM room_memberships as m " " INNER JOIN current_state_events as c" " ON m.event_id = c.event_id AND c.state_key = m.user_id" " WHERE m.user_id = ? AND m.membership = 'join'" ) current_room_membership_args = [user_id] # We also want to get any membership events about that user, e.g. # invites or leave notifications. membership_sql = ( "SELECT m.event_id FROM room_memberships as m " "INNER JOIN current_state_events as c ON m.event_id = c.event_id " "WHERE m.user_id = ? " ) membership_args = [user_id] if limit: limit = max(limit, MAX_STREAM_SIZE) else: limit = MAX_STREAM_SIZE # From and to keys should be integers from ordering. from_id = RoomStreamToken.parse_stream_token(from_key) to_id = RoomStreamToken.parse_stream_token(to_key) if from_key == to_key: return defer.succeed(([], to_key)) sql = ( "SELECT e.event_id, e.stream_ordering FROM events AS e WHERE " "(e.outlier = ? AND (room_id IN (%(current)s)) OR " "(event_id IN (%(invites)s))) " "AND e.stream_ordering > ? AND e.stream_ordering <= ? " "ORDER BY stream_ordering ASC LIMIT %(limit)d " ) % { "current": current_room_membership_sql, "invites": membership_sql, "limit": limit } def f(txn): args = ([False] + current_room_membership_args + membership_args + [from_id.stream, to_id.stream]) txn.execute(sql, args) rows = self.cursor_to_dict(txn) ret = self._get_events_txn( txn, [r["event_id"] for r in rows], get_prev_content=True ) self._set_before_and_after(ret, rows) if rows: key = "s%d" % max(r["stream_ordering"] for r in rows) else: # Assume we didn't get anything because there was nothing to # get. key = to_key return ret, key return self.runInteraction("get_room_events_stream", f)
def get_recent_events_for_room(self, room_id, limit, end_token, from_token=None): # TODO (erikj): Handle compressed feedback end_token = RoomStreamToken.parse_stream_token(end_token) if from_token is None: sql = ( "SELECT stream_ordering, topological_ordering, event_id" " FROM events" " WHERE room_id = ? AND stream_ordering <= ? AND outlier = ?" " ORDER BY topological_ordering DESC, stream_ordering DESC" " LIMIT ?" ) else: from_token = RoomStreamToken.parse_stream_token(from_token) sql = ( "SELECT stream_ordering, topological_ordering, event_id" " FROM events" " WHERE room_id = ? AND stream_ordering > ?" " AND stream_ordering <= ? AND outlier = ?" " ORDER BY topological_ordering DESC, stream_ordering DESC" " LIMIT ?" ) def get_recent_events_for_room_txn(txn): if from_token is None: txn.execute(sql, (room_id, end_token.stream, False, limit,)) else: txn.execute(sql, ( room_id, from_token.stream, end_token.stream, False, limit )) rows = self.cursor_to_dict(txn) rows.reverse() # As we selected with reverse ordering if rows: # Tokens are positions between events. # This token points *after* the last event in the chunk. # We need it to point to the event before it in the chunk # since we are going backwards so we subtract one from the # stream part. topo = rows[0]["topological_ordering"] toke = rows[0]["stream_ordering"] - 1 start_token = str(RoomStreamToken(topo, toke)) token = (start_token, str(end_token)) else: token = (str(end_token), str(end_token)) return rows, token rows, token = yield self.runInteraction( "get_recent_events_for_room", get_recent_events_for_room_txn ) logger.debug("stream before") events = yield self._get_events( [r["event_id"] for r in rows], get_prev_content=True ) logger.debug("stream after") self._set_before_and_after(events, rows) defer.returnValue((events, token))
def get_room_events_stream(self, user_id, from_key, to_key, room_id, limit=0, with_feedback=False): # TODO (erikj): Handle compressed feedback current_room_membership_sql = ( "SELECT m.room_id FROM room_memberships as m " " INNER JOIN current_state_events as c" " ON m.event_id = c.event_id AND c.state_key = m.user_id" " WHERE m.user_id = ? AND m.membership = 'join'" ) # We also want to get any membership events about that user, e.g. # invites or leave notifications. membership_sql = ( "SELECT m.event_id FROM room_memberships as m " "INNER JOIN current_state_events as c ON m.event_id = c.event_id " "WHERE m.user_id = ? " ) if limit: limit = max(limit, MAX_STREAM_SIZE) else: limit = MAX_STREAM_SIZE # From and to keys should be integers from ordering. from_id = RoomStreamToken.parse_stream_token(from_key) to_id = RoomStreamToken.parse_stream_token(to_key) if from_key == to_key: return defer.succeed(([], to_key)) sql = ( "SELECT e.event_id, e.stream_ordering FROM events AS e WHERE " "(e.outlier = ? AND (room_id IN (%(current)s)) OR " "(event_id IN (%(invites)s))) " "AND e.stream_ordering > ? AND e.stream_ordering <= ? " "ORDER BY stream_ordering ASC LIMIT %(limit)d " ) % { "current": current_room_membership_sql, "invites": membership_sql, "limit": limit } def f(txn): txn.execute(sql, (False, user_id, user_id, from_id.stream, to_id.stream,)) rows = self.cursor_to_dict(txn) ret = self._get_events_txn( txn, [r["event_id"] for r in rows], get_prev_content=True ) self._set_before_and_after(ret, rows) if rows: key = "s%d" % max(r["stream_ordering"] for r in rows) else: # Assume we didn't get anything because there was nothing to # get. key = to_key return ret, key return self.runInteraction("get_room_events_stream", f)
def get_user_ids_changed(self, user_id, from_token): """Get list of users that have had the devices updated, or have newly joined a room, that `user_id` may be interested in. Args: user_id (str) from_token (StreamToken) """ now_token = yield self.hs.get_event_sources().get_current_token() room_ids = yield self.store.get_rooms_for_user(user_id) # First we check if any devices have changed changed = yield self.store.get_user_whose_devices_changed( from_token.device_list_key ) # Then work out if any users have since joined rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key) member_events = yield self.store.get_membership_changes_for_user( user_id, from_token.room_key, now_token.room_key ) rooms_changed.update(event.room_id for event in member_events) stream_ordering = RoomStreamToken.parse_stream_token( from_token.room_key ).stream possibly_changed = set(changed) possibly_left = set() for room_id in rooms_changed: current_state_ids = yield self.store.get_current_state_ids(room_id) # The user may have left the room # TODO: Check if they actually did or if we were just invited. if room_id not in room_ids: for key, event_id in iteritems(current_state_ids): etype, state_key = key if etype != EventTypes.Member: continue possibly_left.add(state_key) continue # Fetch the current state at the time. try: event_ids = yield self.store.get_forward_extremeties_for_room( room_id, stream_ordering=stream_ordering ) except errors.StoreError: # we have purged the stream_ordering index since the stream # ordering: treat it the same as a new room event_ids = [] # special-case for an empty prev state: include all members # in the changed list if not event_ids: for key, event_id in iteritems(current_state_ids): etype, state_key = key if etype != EventTypes.Member: continue possibly_changed.add(state_key) continue current_member_id = current_state_ids.get((EventTypes.Member, user_id)) if not current_member_id: continue # mapping from event_id -> state_dict prev_state_ids = yield self.store.get_state_ids_for_events(event_ids) # Check if we've joined the room? If so we just blindly add all the users to # the "possibly changed" users. for state_dict in itervalues(prev_state_ids): member_event = state_dict.get((EventTypes.Member, user_id), None) if not member_event or member_event != current_member_id: for key, event_id in iteritems(current_state_ids): etype, state_key = key if etype != EventTypes.Member: continue possibly_changed.add(state_key) break # If there has been any change in membership, include them in the # possibly changed list. We'll check if they are joined below, # and we're not toooo worried about spuriously adding users. for key, event_id in iteritems(current_state_ids): etype, state_key = key if etype != EventTypes.Member: continue # check if this member has changed since any of the extremities # at the stream_ordering, and add them to the list if so. for state_dict in itervalues(prev_state_ids): prev_event_id = state_dict.get(key, None) if not prev_event_id or prev_event_id != event_id: if state_key != user_id: possibly_changed.add(state_key) break if possibly_changed or possibly_left: users_who_share_room = yield self.store.get_users_who_share_room_with_user( user_id ) # Take the intersection of the users whose devices may have changed # and those that actually still share a room with the user possibly_joined = possibly_changed & users_who_share_room possibly_left = (possibly_changed | possibly_left) - users_who_share_room else: possibly_joined = [] possibly_left = [] defer.returnValue({ "changed": list(possibly_joined), "left": list(possibly_left), })
def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0, order='DESC'): """Get new room events in stream ordering since `from_key`. Args: room_id (str) from_key (str): Token from which no events are returned before to_key (str): Token from which no events are returned after. (This is typically the current stream token) limit (int): Maximum number of events to return order (str): Either "DESC" or "ASC". Determines which events are returned when the result is limited. If "DESC" then the most recent `limit` events are returned, otherwise returns the oldest `limit` events. Returns: Deferred[tuple[list[FrozenEvent], str]]: Returns the list of events (in ascending order) and the token from the start of the chunk of events returned. """ if from_key == to_key: defer.returnValue(([], from_key)) from_id = RoomStreamToken.parse_stream_token(from_key).stream to_id = RoomStreamToken.parse_stream_token(to_key).stream has_changed = yield self._events_stream_cache.has_entity_changed( room_id, from_id ) if not has_changed: defer.returnValue(([], from_key)) def f(txn): sql = ( "SELECT event_id, stream_ordering FROM events WHERE" " room_id = ?" " AND not outlier" " AND stream_ordering > ? AND stream_ordering <= ?" " ORDER BY stream_ordering %s LIMIT ?" ) % (order,) txn.execute(sql, (room_id, from_id, to_id, limit)) rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] return rows rows = yield self.runInteraction("get_room_events_stream_for_room", f) ret = yield self._get_events( [r.event_id for r in rows], get_prev_content=True ) self._set_before_and_after(ret, rows, topo_order=from_id is None) if order.lower() == "desc": ret.reverse() if rows: key = "s%d" % min(r.stream_ordering for r in rows) else: # Assume we didn't get anything because there was nothing to # get. key = from_key defer.returnValue((ret, key))
def paginate_room_events(self, room_id, from_key, to_key=None, direction='b', limit=-1, event_filter=None): # Tokens really represent positions between elements, but we use # the convention of pointing to the event before the gap. Hence # we have a bit of asymmetry when it comes to equalities. args = [False, room_id] if direction == 'b': order = "DESC" bounds = upper_bound( RoomStreamToken.parse(from_key), self.database_engine ) if to_key: bounds = "%s AND %s" % (bounds, lower_bound( RoomStreamToken.parse(to_key), self.database_engine )) else: order = "ASC" bounds = lower_bound( RoomStreamToken.parse(from_key), self.database_engine ) if to_key: bounds = "%s AND %s" % (bounds, upper_bound( RoomStreamToken.parse(to_key), self.database_engine )) filter_clause, filter_args = filter_to_clause(event_filter) if filter_clause: bounds += " AND " + filter_clause args.extend(filter_args) if int(limit) > 0: args.append(int(limit)) limit_str = " LIMIT ?" else: limit_str = "" sql = ( "SELECT * FROM events" " WHERE outlier = ? AND room_id = ? AND %(bounds)s" " ORDER BY topological_ordering %(order)s," " stream_ordering %(order)s %(limit)s" ) % { "bounds": bounds, "order": order, "limit": limit_str } def f(txn): txn.execute(sql, args) rows = self.cursor_to_dict(txn) if rows: topo = rows[-1]["topological_ordering"] toke = rows[-1]["stream_ordering"] if direction == 'b': # Tokens are positions between events. # This token points *after* the last event in the chunk. # We need it to point to the event before it in the chunk # when we are going backwards so we subtract one from the # stream part. toke -= 1 next_token = str(RoomStreamToken(topo, toke)) else: # TODO (erikj): We should work out what to do here instead. next_token = to_key if to_key else from_key return rows, next_token, rows, token = yield self.runInteraction("paginate_room_events", f) events = yield self._get_events( [r["event_id"] for r in rows], get_prev_content=True ) self._set_before_and_after(events, rows) defer.returnValue((events, token))
def get_messages(self, requester, room_id=None, pagin_config=None, as_client_event=True, event_filter=None): """Get messages in a room. Args: requester (Requester): The user requesting messages. room_id (str): The room they want messages from. pagin_config (synapse.api.streams.PaginationConfig): The pagination config rules to apply, if any. as_client_event (bool): True to get events in client-server format. event_filter (Filter): Filter to apply to results or None Returns: dict: Pagination API results """ user_id = requester.user.to_string() if pagin_config.from_token: room_token = pagin_config.from_token.room_key else: pagin_config.from_token = ( yield self.hs.get_event_sources().get_current_token_for_room( room_id=room_id ) ) room_token = pagin_config.from_token.room_key room_token = RoomStreamToken.parse(room_token) pagin_config.from_token = pagin_config.from_token.copy_and_replace( "room_key", str(room_token) ) source_config = pagin_config.get_source_config("room") with (yield self.pagination_lock.read(room_id)): membership, member_event_id = yield self.auth.check_in_room_or_world_readable( room_id, user_id ) if source_config.direction == 'b': # if we're going backwards, we might need to backfill. This # requires that we have a topo token. if room_token.topological: max_topo = room_token.topological else: max_topo = yield self.store.get_max_topological_token( room_id, room_token.stream ) if membership == Membership.LEAVE: # If they have left the room then clamp the token to be before # they left the room, to save the effort of loading from the # database. leave_token = yield self.store.get_topological_token_for_event( member_event_id ) leave_token = RoomStreamToken.parse(leave_token) if leave_token.topological < max_topo: source_config.from_key = str(leave_token) yield self.hs.get_handlers().federation_handler.maybe_backfill( room_id, max_topo ) events, next_key = yield self.store.paginate_room_events( room_id=room_id, from_key=source_config.from_key, to_key=source_config.to_key, direction=source_config.direction, limit=source_config.limit, event_filter=event_filter, ) next_token = pagin_config.from_token.copy_and_replace( "room_key", next_key ) if events: if event_filter: events = event_filter.filter(events) events = yield filter_events_for_client( self.store, user_id, events, is_peeking=(member_event_id is None), ) if not events: defer.returnValue({ "chunk": [], "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), }) state = None if event_filter and event_filter.lazy_load_members(): # TODO: remove redundant members # FIXME: we also care about invite targets etc. state_filter = StateFilter.from_types( (EventTypes.Member, event.sender) for event in events ) state_ids = yield self.store.get_state_ids_for_event( events[0].event_id, state_filter=state_filter, ) if state_ids: state = yield self.store.get_events(list(state_ids.values())) state = state.values() time_now = self.clock.time_msec() chunk = { "chunk": [ serialize_event(e, time_now, as_client_event) for e in events ], "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), } if state: chunk["state"] = [ serialize_event(e, time_now, as_client_event) for e in state ] defer.returnValue(chunk)
def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0, order='DESC'): # Note: If from_key is None then we return in topological order. This # is because in that case we're using this as a "get the last few messages # in a room" function, rather than "get new messages since last sync" if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: from_id = None to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: defer.returnValue(([], from_key)) if from_id: has_changed = yield self._events_stream_cache.has_entity_changed( room_id, from_id ) if not has_changed: defer.returnValue(([], from_key)) def f(txn): if from_id is not None: sql = ( "SELECT event_id, stream_ordering FROM events WHERE" " room_id = ?" " AND not outlier" " AND stream_ordering > ? AND stream_ordering <= ?" " ORDER BY stream_ordering %s LIMIT ?" ) % (order,) txn.execute(sql, (room_id, from_id, to_id, limit)) else: sql = ( "SELECT event_id, stream_ordering FROM events WHERE" " room_id = ?" " AND not outlier" " AND stream_ordering <= ?" " ORDER BY topological_ordering %s, stream_ordering %s LIMIT ?" ) % (order, order,) txn.execute(sql, (room_id, to_id, limit)) rows = self.cursor_to_dict(txn) return rows rows = yield self.runInteraction("get_room_events_stream_for_room", f) ret = yield self._get_events( [r["event_id"] for r in rows], get_prev_content=True ) self._set_before_and_after(ret, rows, topo_order=from_id is None) if order.lower() == "desc": ret.reverse() if rows: key = "s%d" % min(r["stream_ordering"] for r in rows) else: # Assume we didn't get anything because there was nothing to # get. key = from_key defer.returnValue((ret, key))
def get_appservice_room_stream(self, service, from_key, to_key, limit=0): # NB this lives here instead of appservice.py so we can reuse the # 'private' StreamToken class in this file. if limit: limit = max(limit, MAX_STREAM_SIZE) else: limit = MAX_STREAM_SIZE # From and to keys should be integers from ordering. from_id = RoomStreamToken.parse_stream_token(from_key) to_id = RoomStreamToken.parse_stream_token(to_key) if from_key == to_key: defer.returnValue(([], to_key)) return # select all the events between from/to with a sensible limit sql = ( "SELECT e.event_id, e.room_id, e.type, s.state_key, " "e.stream_ordering FROM events AS e " "LEFT JOIN state_events as s ON " "e.event_id = s.event_id " "WHERE e.stream_ordering > ? AND e.stream_ordering <= ? " "ORDER BY stream_ordering ASC LIMIT %(limit)d " ) % { "limit": limit } def f(txn): # pull out all the events between the tokens txn.execute(sql, (from_id.stream, to_id.stream,)) rows = self.cursor_to_dict(txn) # Logic: # - We want ALL events which match the AS room_id regex # - We want ALL events which match the rooms represented by the AS # room_alias regex # - We want ALL events for rooms that AS users have joined. # This is currently supported via get_app_service_rooms (which is # used for the Notifier listener rooms). We can't reasonably make a # SQL query for these room IDs, so we'll pull all the events between # from/to and filter in python. rooms_for_as = self._get_app_service_rooms_txn(txn, service) room_ids_for_as = [r.room_id for r in rooms_for_as] def app_service_interested(row): if row["room_id"] in room_ids_for_as: return True if row["type"] == EventTypes.Member: if service.is_interested_in_user(row.get("state_key")): return True return False return [r for r in rows if app_service_interested(r)] rows = yield self.runInteraction("get_appservice_room_stream", f) ret = yield self._get_events( [r["event_id"] for r in rows], get_prev_content=True ) self._set_before_and_after(ret, rows, topo_order=from_id is None) if rows: key = "s%d" % max(r["stream_ordering"] for r in rows) else: # Assume we didn't get anything because there was nothing to # get. key = to_key defer.returnValue((ret, key))