def _get_current_state_ids_txn(txn): txn.execute( """SELECT type, state_key, event_id FROM current_state_events WHERE room_id = ? """, (room_id, )) return {(intern_string(r[0]), intern_string(r[1])): to_ascii(r[2]) for r in txn}
def _get_room_summary_txn(txn): # first get counts. # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats sql = """ SELECT count(*), m.membership FROM room_memberships as m INNER JOIN current_state_events as c ON m.event_id = c.event_id AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? GROUP BY m.membership """ txn.execute(sql, (room_id, )) res = {} for count, membership in txn: summary = res.setdefault(to_ascii(membership), MemberSummary([], count)) # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent sql = """ SELECT m.user_id, m.membership, m.event_id FROM room_memberships as m INNER JOIN current_state_events as c ON m.event_id = c.event_id AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? ORDER BY CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, m.event_id ASC LIMIT ? """ # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) for user_id, membership, event_id in txn: summary = res[to_ascii(membership)] # we will always have a summary for this membership type at this # point given the summary currently contains the counts. members = summary.members members.append((to_ascii(user_id), to_ascii(event_id))) return res
def _get_room_summary_txn(txn): # first get counts. # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats sql = """ SELECT count(*), m.membership FROM room_memberships as m INNER JOIN current_state_events as c ON m.event_id = c.event_id AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? GROUP BY m.membership """ txn.execute(sql, (room_id,)) res = {} for count, membership in txn: summary = res.setdefault(to_ascii(membership), MemberSummary([], count)) # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent sql = """ SELECT m.user_id, m.membership, m.event_id FROM room_memberships as m INNER JOIN current_state_events as c ON m.event_id = c.event_id AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? ORDER BY CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, m.event_id ASC LIMIT ? """ # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) for user_id, membership, event_id in txn: summary = res[to_ascii(membership)] # we will always have a summary for this membership type at this # point given the summary currently contains the counts. members = summary.members members.append((to_ascii(user_id), to_ascii(event_id))) return res
def _get_state_for_groups(self, groups, types=None): """Given list of groups returns dict of group -> list of state events with matching types. `types` is a list of `(type, state_key)`, where a `state_key` of None matches all state_keys. If `types` is None then all events are returned. """ if types: types = frozenset(types) results = {} missing_groups = [] if types is not None: for group in set(groups): state_dict_ids, _, got_all = self._get_some_state_from_cache( group, types ) results[group] = state_dict_ids if not got_all: missing_groups.append(group) else: for group in set(groups): state_dict_ids, got_all = self._get_all_state_from_cache( group ) results[group] = state_dict_ids if not got_all: missing_groups.append(group) if missing_groups: # Okay, so we have some missing_types, lets fetch them. cache_seq_num = self._state_group_cache.sequence group_to_state_dict = yield self._get_state_groups_from_groups( missing_groups, types ) # Now we want to update the cache with all the things we fetched # from the database. for group, group_state_dict in group_to_state_dict.iteritems(): state_dict = results[group] state_dict.update( ((intern_string(k[0]), intern_string(k[1])), to_ascii(v)) for k, v in group_state_dict.iteritems() ) self._state_group_cache.update( cache_seq_num, key=group, value=state_dict, full=(types is None), known_absent=types, ) defer.returnValue(results)
def _get_current_state_ids_txn(txn): txn.execute( """SELECT type, state_key, event_id FROM current_state_events WHERE room_id = ? """, (room_id,) ) return { (intern_string(r[0]), intern_string(r[1])): to_ascii(r[2]) for r in txn }
def f(txn): sql = ( "SELECT m.user_id FROM room_memberships as m" " INNER JOIN current_state_events as c" " ON m.event_id = c.event_id " " AND m.room_id = c.room_id " " AND m.user_id = c.state_key" " WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?" ) txn.execute(sql, (room_id, Membership.JOIN,)) return [to_ascii(r[0]) for r in txn]
def f(txn): sql = ( "SELECT m.user_id FROM room_memberships as m" " INNER JOIN current_state_events as c" " ON m.event_id = c.event_id " " AND m.room_id = c.room_id " " AND m.user_id = c.state_key" " WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?" ) txn.execute(sql, (room_id, Membership.JOIN)) return [to_ascii(r[0]) for r in txn]
def _get_state_for_groups(self, groups, types=None): """Given list of groups returns dict of group -> list of state events with matching types. `types` is a list of `(type, state_key)`, where a `state_key` of None matches all state_keys. If `types` is None then all events are returned. """ if types: types = frozenset(types) results = {} missing_groups = [] if types is not None: for group in set(groups): state_dict_ids, _, got_all = self._get_some_state_from_cache( group, types) results[group] = state_dict_ids if not got_all: missing_groups.append(group) else: for group in set(groups): state_dict_ids, got_all = self._get_all_state_from_cache(group) results[group] = state_dict_ids if not got_all: missing_groups.append(group) if missing_groups: # Okay, so we have some missing_types, lets fetch them. cache_seq_num = self._state_group_cache.sequence group_to_state_dict = yield self._get_state_groups_from_groups( missing_groups, types) # Now we want to update the cache with all the things we fetched # from the database. for group, group_state_dict in iteritems(group_to_state_dict): state_dict = results[group] state_dict.update( ((intern_string(k[0]), intern_string(k[1])), to_ascii(v)) for k, v in iteritems(group_state_dict)) self._state_group_cache.update( cache_seq_num, key=group, value=state_dict, full=(types is None), known_absent=types, ) defer.returnValue(results)
def wrapped(*args, **kwargs): # If we're passed a cache_context then we'll want to call its invalidate() # whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) cache_key = get_cache_key(args, kwargs) # Add our own `cache_context` to argument list if the wrapped function # has asked for one if self.add_cache_context: kwargs["cache_context"] = _CacheContext(cache, cache_key) try: cached_result_d = cache.get(cache_key, callback=invalidate_callback) if isinstance(cached_result_d, ObservableDeferred): observer = cached_result_d.observe() else: observer = cached_result_d except KeyError: ret = defer.maybeDeferred( logcontext.preserve_fn(self.function_to_call), obj, *args, **kwargs) def onErr(f): cache.invalidate(cache_key) return f ret.addErrback(onErr) # If our cache_key is a string on py2, try to convert to ascii # to save a bit of space in large caches. Py3 does this # internally automatically. if six.PY2 and isinstance(cache_key, string_types): cache_key = to_ascii(cache_key) result_d = ObservableDeferred(ret, consumeErrors=True) cache.set(cache_key, result_d, callback=invalidate_callback) observer = result_d.observe() if isinstance(observer, defer.Deferred): return logcontext.make_deferred_yieldable(observer) else: return observer
def wrapped(*args, **kwargs): # If we're passed a cache_context then we'll want to call its invalidate() # whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) cache_key = get_cache_key(args, kwargs) # Add our own `cache_context` to argument list if the wrapped function # has asked for one if self.add_cache_context: kwargs["cache_context"] = _CacheContext(cache, cache_key) try: cached_result_d = cache.get(cache_key, callback=invalidate_callback) if isinstance(cached_result_d, ObservableDeferred): observer = cached_result_d.observe() else: observer = cached_result_d except KeyError: ret = defer.maybeDeferred( logcontext.preserve_fn(self.function_to_call), obj, *args, **kwargs ) def onErr(f): cache.invalidate(cache_key) return f ret.addErrback(onErr) # If our cache_key is a string on py2, try to convert to ascii # to save a bit of space in large caches. Py3 does this # internally automatically. if six.PY2 and isinstance(cache_key, string_types): cache_key = to_ascii(cache_key) result_d = ObservableDeferred(ret, consumeErrors=True) cache.set(cache_key, result_d, callback=invalidate_callback) observer = result_d.observe() if isinstance(observer, defer.Deferred): return logcontext.make_deferred_yieldable(observer) else: return observer
def get_users_in_room_txn(self, txn, room_id): # If we can assume current_state_events.membership is up to date # then we can avoid a join, which is a Very Good Thing given how # frequently this function gets called. if self._current_state_events_membership_up_to_date: sql = """ SELECT state_key FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? AND membership = ? """ else: sql = """ SELECT state_key FROM room_memberships as m INNER JOIN current_state_events as c ON m.event_id = c.event_id AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? """ txn.execute(sql, (room_id, Membership.JOIN)) return [to_ascii(r[0]) for r in txn]
def _get_joined_users_from_context( self, room_id, state_group, current_state_ids, cache_context, event=None, context=None, ): # We don't use `state_group`, it's there so that we can cache based # on it. However, it's important that it's never None, since two current_states # with a state_group of None are likely to be different. # See bulk_get_push_rules_for_room for how we work around this. assert state_group is not None users_in_room = {} member_event_ids = [ e_id for key, e_id in iteritems(current_state_ids) if key[0] == EventTypes.Member ] if context is not None: # If we have a context with a delta from a previous state group, # check if we also have the result from the previous group in cache. # If we do then we can reuse that result and simply update it with # any membership changes in `delta_ids` if context.prev_group and context.delta_ids: prev_res = self._get_joined_users_from_context.cache.get( (room_id, context.prev_group), None) if prev_res and isinstance(prev_res, dict): users_in_room = dict(prev_res) member_event_ids = [ e_id for key, e_id in iteritems(context.delta_ids) if key[0] == EventTypes.Member ] for etype, state_key in context.delta_ids: users_in_room.pop(state_key, None) # We check if we have any of the member event ids in the event cache # before we ask the DB # We don't update the event cache hit ratio as it completely throws off # the hit ratio counts. After all, we don't populate the cache if we # miss it here event_map = self._get_events_from_cache(member_event_ids, allow_rejected=False, update_metrics=False) missing_member_event_ids = [] for event_id in member_event_ids: ev_entry = event_map.get(event_id) if ev_entry: if ev_entry.event.membership == Membership.JOIN: users_in_room[to_ascii( ev_entry.event.state_key)] = ProfileInfo( display_name=to_ascii( ev_entry.event.content.get( "displayname", None)), avatar_url=to_ascii( ev_entry.event.content.get("avatar_url", None)), ) else: missing_member_event_ids.append(event_id) if missing_member_event_ids: event_to_memberships = yield self._get_joined_profiles_from_event_ids( missing_member_event_ids) users_in_room.update( (row for row in event_to_memberships.values() if row)) if event is not None and event.type == EventTypes.Member: if event.membership == Membership.JOIN: if event.event_id in member_event_ids: users_in_room[to_ascii(event.state_key)] = ProfileInfo( display_name=to_ascii( event.content.get("displayname", None)), avatar_url=to_ascii( event.content.get("avatar_url", None)), ) return users_in_room
def _get_room_summary_txn(txn): # first get counts. # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats # If we can assume current_state_events.membership is up to date # then we can avoid a join, which is a Very Good Thing given how # frequently this function gets called. if self._current_state_events_membership_up_to_date: # Note, rejected events will have a null membership field, so # we we manually filter them out. sql = """ SELECT count(*), membership FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? AND membership IS NOT NULL GROUP BY membership """ else: sql = """ SELECT count(*), m.membership FROM room_memberships as m INNER JOIN current_state_events as c ON m.event_id = c.event_id AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? GROUP BY m.membership """ txn.execute(sql, (room_id, )) res = {} for count, membership in txn: summary = res.setdefault(to_ascii(membership), MemberSummary([], count)) # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent if self._current_state_events_membership_up_to_date: # Note, rejected events will have a null membership field, so # we we manually filter them out. sql = """ SELECT state_key, membership, event_id FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? AND membership IS NOT NULL ORDER BY CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, event_id ASC LIMIT ? """ else: sql = """ SELECT c.state_key, m.membership, c.event_id FROM room_memberships as m INNER JOIN current_state_events as c USING (room_id, event_id) WHERE c.type = 'm.room.member' AND c.room_id = ? ORDER BY CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, c.event_id ASC LIMIT ? """ # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) for user_id, membership, event_id in txn: summary = res[to_ascii(membership)] # we will always have a summary for this membership type at this # point given the summary currently contains the counts. members = summary.members members.append((to_ascii(user_id), to_ascii(event_id))) return res
def _get_joined_users_from_context( self, room_id, state_group, current_state_ids, cache_context, event=None, context=None, ): # We don't use `state_group`, it's there so that we can cache based # on it. However, it's important that it's never None, since two current_states # with a state_group of None are likely to be different. # See bulk_get_push_rules_for_room for how we work around this. assert state_group is not None users_in_room = {} member_event_ids = [ e_id for key, e_id in iteritems(current_state_ids) if key[0] == EventTypes.Member ] if context is not None: # If we have a context with a delta from a previous state group, # check if we also have the result from the previous group in cache. # If we do then we can reuse that result and simply update it with # any membership changes in `delta_ids` if context.prev_group and context.delta_ids: prev_res = self._get_joined_users_from_context.cache.get( (room_id, context.prev_group), None ) if prev_res and isinstance(prev_res, dict): users_in_room = dict(prev_res) member_event_ids = [ e_id for key, e_id in iteritems(context.delta_ids) if key[0] == EventTypes.Member ] for etype, state_key in context.delta_ids: users_in_room.pop(state_key, None) # We check if we have any of the member event ids in the event cache # before we ask the DB # We don't update the event cache hit ratio as it completely throws off # the hit ratio counts. After all, we don't populate the cache if we # miss it here event_map = self._get_events_from_cache( member_event_ids, allow_rejected=False, update_metrics=False ) missing_member_event_ids = [] for event_id in member_event_ids: ev_entry = event_map.get(event_id) if ev_entry: if ev_entry.event.membership == Membership.JOIN: users_in_room[to_ascii(ev_entry.event.state_key)] = ProfileInfo( display_name=to_ascii( ev_entry.event.content.get("displayname", None) ), avatar_url=to_ascii( ev_entry.event.content.get("avatar_url", None) ), ) else: missing_member_event_ids.append(event_id) if missing_member_event_ids: rows = yield self._simple_select_many_batch( table="room_memberships", column="event_id", iterable=missing_member_event_ids, retcols=('user_id', 'display_name', 'avatar_url'), keyvalues={"membership": Membership.JOIN}, batch_size=500, desc="_get_joined_users_from_context", ) users_in_room.update( { to_ascii(row["user_id"]): ProfileInfo( avatar_url=to_ascii(row["avatar_url"]), display_name=to_ascii(row["display_name"]), ) for row in rows } ) if event is not None and event.type == EventTypes.Member: if event.membership == Membership.JOIN: if event.event_id in member_event_ids: users_in_room[to_ascii(event.state_key)] = ProfileInfo( display_name=to_ascii(event.content.get("displayname", None)), avatar_url=to_ascii(event.content.get("avatar_url", None)), ) defer.returnValue(users_in_room)
def _get_state_for_groups(self, groups, types=None): """Given list of groups returns dict of group -> list of state events with matching types. `types` is a list of `(type, state_key)`, where a `state_key` of None matches all state_keys. If `types` is None then all events are returned. """ if types: types = frozenset(types) results = {} missing_groups = [] if types is not None: for group in set(groups): state_dict_ids, missing_types, got_all = self._get_some_state_from_cache( group, types) results[group] = state_dict_ids if not got_all: missing_groups.append(group) else: for group in set(groups): state_dict_ids, got_all = self._get_all_state_from_cache(group) results[group] = state_dict_ids if not got_all: missing_groups.append(group) if missing_groups: # Okay, so we have some missing_types, lets fetch them. cache_seq_num = self._state_group_cache.sequence group_to_state_dict = yield self._get_state_groups_from_groups( missing_groups, types) # Now we want to update the cache with all the things we fetched # from the database. for group, group_state_dict in group_to_state_dict.iteritems(): if types: # We delibrately put key -> None mappings into the cache to # cache absence of the key, on the assumption that if we've # explicitly asked for some types then we will probably ask # for them again. state_dict = {(intern_string(etype), intern_string(state_key)): None for (etype, state_key) in types} state_dict.update(results[group]) results[group] = state_dict else: state_dict = results[group] state_dict.update( ((intern_string(k[0]), intern_string(k[1])), to_ascii(v)) for k, v in group_state_dict.iteritems()) self._state_group_cache.update( cache_seq_num, key=group, value=state_dict, full=(types is None), ) # Remove all the entries with None values. The None values were just # used for bookkeeping in the cache. for group, state_dict in results.iteritems(): results[group] = { key: event_id for key, event_id in state_dict.iteritems() if event_id } defer.returnValue(results)