def __init__( self, database: DatabasePool, db_conn: LoggingDatabaseConnection, hs: "HomeServer", ): self.services_cache = load_appservices( hs.hostname, hs.config.appservice.app_service_config_files) self.exclusive_user_regex = _make_exclusive_regex(self.services_cache) def get_max_as_txn_id(txn: Cursor) -> int: logger.warning( "Falling back to slow query, you should port to postgres") txn.execute( "SELECT COALESCE(max(txn_id), 0) FROM application_services_txns" ) return cast(Tuple[int], txn.fetchone())[0] self._as_txn_seq_gen = build_sequence_generator( db_conn, database.engine, get_max_as_txn_id, "application_services_txn_id_seq", table="application_services_txns", id_column="txn_id", ) super().__init__(database, db_conn, hs)
def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) # Originally the state store used a single DictionaryCache to cache the # event IDs for the state types in a given state group to avoid hammering # on the state_group* tables. # # The point of using a DictionaryCache is that it can cache a subset # of the state events for a given state group (i.e. a subset of the keys for a # given dict which is an entry in the cache for a given state group ID). # # However, this poses problems when performing complicated queries # on the store - for instance: "give me all the state for this group, but # limit members to this subset of users", as DictionaryCache's API isn't # rich enough to say "please cache any of these fields, apart from this subset". # This is problematic when lazy loading members, which requires this behaviour, # as without it the cache has no choice but to speculatively load all # state events for the group, which negates the efficiency being sought. # # Rather than overcomplicating DictionaryCache's API, we instead split the # state_group_cache into two halves - one for tracking non-member events, # and the other for tracking member_events. This means that lazy loading # queries can be made in a cache-friendly manner by querying both caches # separately and then merging the result. So for the example above, you # would query the members cache for a specific subset of state keys # (which DictionaryCache will handle efficiently and fine) and the non-members # cache for all state (which DictionaryCache will similarly handle fine) # and then just merge the results together. # # We size the non-members cache to be smaller than the members cache as the # vast majority of state in Matrix (today) is member events. self._state_group_cache = DictionaryCache( "*stateGroupCache*", # TODO: this hasn't been tuned yet 50000, ) self._state_group_members_cache = DictionaryCache( "*stateGroupMembersCache*", 500000, ) def get_max_state_group_txn(txn: Cursor): txn.execute("SELECT COALESCE(max(id), 0) FROM state_groups") return txn.fetchone()[0] self._state_group_seq_gen = build_sequence_generator( db_conn, self.database_engine, get_max_state_group_txn, "state_group_id_seq", table="state_groups", id_column="id", )