Beispiel #1
0
    def _invalidate_state_caches_and_stream(self, txn, room_id,
                                            members_changed):
        """Special case invalidation of caches based on current state.

        We special case this so that we can batch the cache invalidations into a
        single replication poke.

        Args:
            txn
            room_id (str): Room where state changed
            members_changed (iterable[str]): The user_ids of members that have changed
        """
        txn.call_after(self._invalidate_state_caches, room_id, members_changed)

        if members_changed:
            # We need to be careful that the size of the `members_changed` list
            # isn't so large that it causes problems sending over replication, so we
            # send them in chunks.
            # Max line length is 16K, and max user ID length is 255, so 50 should
            # be safe.
            for chunk in batch_iter(members_changed, 50):
                keys = itertools.chain([room_id], chunk)
                self._send_invalidation_to_replication(
                    txn, CURRENT_STATE_CACHE_NAME, keys)
        else:
            # if no members changed, we still need to invalidate the other caches.
            self._send_invalidation_to_replication(txn,
                                                   CURRENT_STATE_CACHE_NAME,
                                                   [room_id])
Beispiel #2
0
    def _update_presence_txn(self, txn, stream_orderings, presence_states):
        for stream_id, state in zip(stream_orderings, presence_states):
            txn.call_after(
                self.presence_stream_cache.entity_has_changed,
                state.user_id,
                stream_id,
            )
            txn.call_after(self._get_presence_for_user.invalidate,
                           (state.user_id, ))

        # Actually insert new rows
        self._simple_insert_many_txn(
            txn,
            table="presence_stream",
            values=[{
                "stream_id": stream_id,
                "user_id": state.user_id,
                "state": state.state,
                "last_active_ts": state.last_active_ts,
                "last_federation_update_ts": state.last_federation_update_ts,
                "last_user_sync_ts": state.last_user_sync_ts,
                "status_msg": state.status_msg,
                "currently_active": state.currently_active,
            } for state in presence_states],
        )

        # Delete old rows to stop database from getting really big
        sql = ("DELETE FROM presence_stream WHERE"
               " stream_id < ?"
               " AND user_id IN (%s)")

        for states in batch_iter(presence_states, 50):
            args = [stream_id]
            args.extend(s.user_id for s in states)
            txn.execute(sql % (",".join("?" for _ in states), ), args)
Beispiel #3
0
    def _update_presence_txn(self, txn, stream_orderings, presence_states):
        for stream_id, state in zip(stream_orderings, presence_states):
            txn.call_after(self.presence_stream_cache.entity_has_changed,
                           state.user_id, stream_id)
            txn.call_after(self._get_presence_for_user.invalidate,
                           (state.user_id, ))

        # Actually insert new rows
        self.db.simple_insert_many_txn(
            txn,
            table="presence_stream",
            values=[{
                "stream_id": stream_id,
                "user_id": state.user_id,
                "state": state.state,
                "last_active_ts": state.last_active_ts,
                "last_federation_update_ts": state.last_federation_update_ts,
                "last_user_sync_ts": state.last_user_sync_ts,
                "status_msg": state.status_msg,
                "currently_active": state.currently_active,
            } for state in presence_states],
        )

        # Delete old rows to stop database from getting really big
        sql = "DELETE FROM presence_stream WHERE stream_id < ? AND "

        for states in batch_iter(presence_states, 50):
            clause, args = make_in_list_sql_clause(self.database_engine,
                                                   "user_id",
                                                   [s.user_id for s in states])
            txn.execute(sql + clause, [stream_id] + list(args))
Beispiel #4
0
        def _get_users_whose_devices_changed_txn(txn):
            changes = set()

            sql = """
                SELECT DISTINCT user_id FROM device_lists_stream
                WHERE stream_id > ?
                AND user_id IN (%s)
            """

            for chunk in batch_iter(to_check, 100):
                txn.execute(sql % (",".join("?" for _ in chunk),), (from_key,) + chunk)
                changes.update(user_id for user_id, in txn)

            return changes
Beispiel #5
0
        def _get_users_whose_devices_changed_txn(txn):
            changes = set()

            sql = """
                SELECT DISTINCT user_id FROM device_lists_stream
                WHERE stream_id > ?
                AND
            """

            for chunk in batch_iter(to_check, 100):
                clause, args = make_in_list_sql_clause(txn.database_engine,
                                                       "user_id", chunk)
                txn.execute(sql + clause, (from_key, ) + tuple(args))
                changes.update(user_id for user_id, in txn)

            return changes
Beispiel #6
0
    def _update_presence_txn(self, txn, stream_orderings, presence_states):
        for stream_id, state in zip(stream_orderings, presence_states):
            txn.call_after(
                self.presence_stream_cache.entity_has_changed,
                state.user_id, stream_id,
            )
            txn.call_after(
                self._get_presence_for_user.invalidate, (state.user_id,)
            )

        # Actually insert new rows
        self._simple_insert_many_txn(
            txn,
            table="presence_stream",
            values=[
                {
                    "stream_id": stream_id,
                    "user_id": state.user_id,
                    "state": state.state,
                    "last_active_ts": state.last_active_ts,
                    "last_federation_update_ts": state.last_federation_update_ts,
                    "last_user_sync_ts": state.last_user_sync_ts,
                    "status_msg": state.status_msg,
                    "currently_active": state.currently_active,
                }
                for state in presence_states
            ],
        )

        # Delete old rows to stop database from getting really big
        sql = (
            "DELETE FROM presence_stream WHERE"
            " stream_id < ?"
            " AND user_id IN (%s)"
        )

        for states in batch_iter(presence_states, 50):
            args = [stream_id]
            args.extend(s.user_id for s in states)
            txn.execute(
                sql % (",".join("?" for _ in states),),
                args
            )
Beispiel #7
0
    def _fetch_event_rows(self, txn, event_ids):
        """Fetch event rows from the database

        Events which are not found are omitted from the result.

        The returned per-event dicts contain the following keys:

         * event_id (str)

         * json (str): json-encoded event structure

         * internal_metadata (str): json-encoded internal metadata dict

         * format_version (int|None): The format of the event. Hopefully one
           of EventFormatVersions. 'None' means the event predates
           EventFormatVersions (so the event is format V1).

         * rejected_reason (str|None): if the event was rejected, the reason
           why.

         * redactions (List[str]): a list of event-ids which (claim to) redact
           this event.

        Args:
            txn (twisted.enterprise.adbapi.Connection):
            event_ids (Iterable[str]): event IDs to fetch

        Returns:
            Dict[str, Dict]: a map from event id to event info.
        """
        event_dict = {}
        for evs in batch_iter(event_ids, 200):
            sql = ("SELECT "
                   " e.event_id, "
                   " e.internal_metadata,"
                   " e.json,"
                   " e.format_version, "
                   " rej.reason "
                   " FROM event_json as e"
                   " LEFT JOIN rejections as rej USING (event_id)"
                   " WHERE e.event_id IN (%s)") % (",".join(
                       ["?"] * len(evs)), )

            txn.execute(sql, evs)

            for row in txn:
                event_id = row[0]
                event_dict[event_id] = {
                    "event_id": event_id,
                    "internal_metadata": row[1],
                    "json": row[2],
                    "format_version": row[3],
                    "rejected_reason": row[4],
                    "redactions": [],
                }

            # check for redactions
            redactions_sql = (
                "SELECT event_id, redacts FROM redactions WHERE redacts IN (%s)"
            ) % (",".join(["?"] * len(evs)), )

            txn.execute(redactions_sql, evs)

            for (redacter, redacted) in txn:
                d = event_dict.get(redacted)
                if d:
                    d["redactions"].append(redacter)

        return event_dict
Beispiel #8
0
 def _txn(txn):
     for batch in batch_iter(server_name_and_key_ids, 50):
         _get_keys(txn, batch)
     return keys
Beispiel #9
0
 def _txn(txn):
     for batch in batch_iter(server_name_and_key_ids, 50):
         _get_keys(txn, batch)
     return keys