def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) # Originally the state store used a single DictionaryCache to cache the # event IDs for the state types in a given state group to avoid hammering # on the state_group* tables. # # The point of using a DictionaryCache is that it can cache a subset # of the state events for a given state group (i.e. a subset of the keys for a # given dict which is an entry in the cache for a given state group ID). # # However, this poses problems when performing complicated queries # on the store - for instance: "give me all the state for this group, but # limit members to this subset of users", as DictionaryCache's API isn't # rich enough to say "please cache any of these fields, apart from this subset". # This is problematic when lazy loading members, which requires this behaviour, # as without it the cache has no choice but to speculatively load all # state events for the group, which negates the efficiency being sought. # # Rather than overcomplicating DictionaryCache's API, we instead split the # state_group_cache into two halves - one for tracking non-member events, # and the other for tracking member_events. This means that lazy loading # queries can be made in a cache-friendly manner by querying both caches # separately and then merging the result. So for the example above, you # would query the members cache for a specific subset of state keys # (which DictionaryCache will handle efficiently and fine) and the non-members # cache for all state (which DictionaryCache will similarly handle fine) # and then just merge the results together. # # We size the non-members cache to be smaller than the members cache as the # vast majority of state in Matrix (today) is member events. self._state_group_cache = DictionaryCache( "*stateGroupCache*", # TODO: this hasn't been tuned yet 50000, ) self._state_group_members_cache = DictionaryCache( "*stateGroupMembersCache*", 500000, ) def get_max_state_group_txn(txn: Cursor): txn.execute("SELECT COALESCE(max(id), 0) FROM state_groups") return txn.fetchone()[0] self._state_group_seq_gen = build_sequence_generator( db_conn, self.database_engine, get_max_state_group_txn, "state_group_id_seq", table="state_groups", id_column="id", )
def __init__(self, hs): self.hs = hs self._db_pool = hs.get_db_pool() self._clock = hs.get_clock() self._previous_txn_total_time = 0 self._current_txn_total_time = 0 self._previous_loop_ts = 0 # TODO(paul): These can eventually be removed once the metrics code # is running in mainline, and we have some nice monitoring frontends # to watch it self._txn_perf_counters = PerformanceCounters() self._get_event_counters = PerformanceCounters() self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True, max_entries=hs.config.event_cache_size) self._state_group_cache = DictionaryCache( "*stateGroupCache*", 2000 * CACHE_SIZE_FACTOR ) self._event_fetch_lock = threading.Condition() self._event_fetch_list = [] self._event_fetch_ongoing = 0 self._pending_ds = [] self.database_engine = hs.database_engine
def __init__(self, hs): self.hs = hs self._db_pool = hs.get_db_pool() self._clock = hs.get_clock() self._previous_txn_total_time = 0 self._current_txn_total_time = 0 self._previous_loop_ts = 0 # TODO(paul): These can eventually be removed once the metrics code # is running in mainline, and we have some nice monitoring frontends # to watch it self._txn_perf_counters = PerformanceCounters() self._get_event_counters = PerformanceCounters() self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True, max_entries=hs.config.event_cache_size) self._state_group_cache = DictionaryCache("*stateGroupCache*", 2000) self._event_fetch_lock = threading.Condition() self._event_fetch_list = [] self._event_fetch_ongoing = 0 self._pending_ds = [] self.database_engine = hs.database_engine self._stream_id_gen = StreamIdGenerator("events", "stream_ordering") self._transaction_id_gen = IdGenerator("sent_transactions", "id", self) self._state_groups_id_gen = IdGenerator("state_groups", "id", self) self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self) self._pushers_id_gen = IdGenerator("pushers", "id", self) self._push_rule_id_gen = IdGenerator("push_rules", "id", self) self._push_rules_enable_id_gen = IdGenerator("push_rules_enable", "id", self) self._receipts_id_gen = StreamIdGenerator("receipts_linearized", "stream_id")
def __init__(self, db_conn, hs): super(StateGroupWorkerStore, self).__init__(db_conn, hs) self._state_group_cache = DictionaryCache( "*stateGroupCache*", 500000 * get_cache_factor_for("stateGroupCache"))
def setUp(self): self.cache = DictionaryCache("foobar")
def __init__(self, db_conn, hs): super(StateGroupWorkerStore, self).__init__(db_conn, hs) self._state_group_cache = DictionaryCache("*stateGroupCache*", 100000 * CACHE_SIZE_FACTOR)