예제 #1
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        super().__init__(database, db_conn, hs)

        self.hs = hs

        self._device_list_id_gen = SlavedIdTracker(
            db_conn,
            "device_lists_stream",
            "stream_id",
            extra_tables=[
                ("user_signature_stream", "stream_id"),
                ("device_lists_outbound_pokes", "stream_id"),
            ],
        )
        device_list_max = self._device_list_id_gen.get_current_token()
        self._device_list_stream_cache = StreamChangeCache(
            "DeviceListStreamChangeCache", device_list_max)
        self._user_signature_stream_cache = StreamChangeCache(
            "UserSignatureStreamChangeCache", device_list_max)
        self._device_list_federation_stream_cache = StreamChangeCache(
            "DeviceListFederationStreamChangeCache", device_list_max)
예제 #2
0
    def __init__(self, db_conn, hs):
        super(SlavedEventStore, self).__init__(db_conn, hs)
        self._stream_id_gen = SlavedIdTracker(
            db_conn,
            "events",
            "stream_ordering",
        )
        self._backfill_id_gen = SlavedIdTracker(db_conn,
                                                "events",
                                                "stream_ordering",
                                                step=-1)
        events_max = self._stream_id_gen.get_current_token()
        event_cache_prefill, min_event_val = self._get_cache_dict(
            db_conn,
            "events",
            entity_column="room_id",
            stream_column="stream_ordering",
            max_value=events_max,
        )
        self._events_stream_cache = StreamChangeCache(
            "EventsRoomStreamChangeCache",
            min_event_val,
            prefilled_cache=event_cache_prefill,
        )
        self._membership_stream_cache = StreamChangeCache(
            "MembershipStreamChangeCache",
            events_max,
        )

        self.stream_ordering_month_ago = 0
        self._stream_order_on_start = self.get_room_max_stream_ordering()
예제 #3
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        super(StreamWorkerStore, self).__init__(database, db_conn, hs)

        self._instance_name = hs.get_instance_name()
        self._send_federation = hs.should_send_federation()
        self._federation_shard_config = hs.config.worker.federation_shard_config

        # If we're a process that sends federation we may need to reset the
        # `federation_stream_position` table to match the current sharding
        # config. We don't do this now as otherwise two processes could conflict
        # during startup which would cause one to die.
        self._need_to_reset_federation_stream_positions = self._send_federation

        events_max = self.get_room_max_stream_ordering()
        event_cache_prefill, min_event_val = self.db_pool.get_cache_dict(
            db_conn,
            "events",
            entity_column="room_id",
            stream_column="stream_ordering",
            max_value=events_max,
        )
        self._events_stream_cache = StreamChangeCache(
            "EventsRoomStreamChangeCache",
            min_event_val,
            prefilled_cache=event_cache_prefill,
        )
        self._membership_stream_cache = StreamChangeCache(
            "MembershipStreamChangeCache", events_max
        )

        self._stream_order_on_start = self.get_room_max_stream_ordering()
예제 #4
0
 def __init__(self, db_conn, hs):
     super(SlavedDeviceInboxStore, self).__init__(db_conn, hs)
     self._device_inbox_id_gen = SlavedIdTracker(
         db_conn,
         "device_max_stream_id",
         "stream_id",
     )
     self._device_inbox_stream_cache = StreamChangeCache(
         "DeviceInboxStreamChangeCache",
         self._device_inbox_id_gen.get_current_token())
     self._device_federation_outbox_stream_cache = StreamChangeCache(
         "DeviceFederationOutboxStreamChangeCache",
         self._device_inbox_id_gen.get_current_token())
예제 #5
0
    def __init__(self, db_conn, hs):
        super(SlavedDeviceStore, self).__init__(db_conn, hs)

        self.hs = hs

        self._device_list_id_gen = SlavedIdTracker(db_conn,
                                                   "device_lists_stream",
                                                   "stream_id")
        device_list_max = self._device_list_id_gen.get_current_token()
        self._device_list_stream_cache = StreamChangeCache(
            "DeviceListStreamChangeCache", device_list_max)
        self._device_list_federation_stream_cache = StreamChangeCache(
            "DeviceListFederationStreamChangeCache", device_list_max)
예제 #6
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        account_max = self.get_max_account_data_stream_id()
        self._account_data_stream_cache = StreamChangeCache(
            "AccountDataAndTagsChangeCache", account_max
        )

        super().__init__(database, db_conn, hs)
    def test_entity_has_changed_pops_off_start(self):
        """
        StreamChangeCache.entity_has_changed will respect the max size and
        purge the oldest items upon reaching that max size.
        """
        cache = StreamChangeCache("#test", 1, max_size=2)

        cache.entity_has_changed("*****@*****.**", 2)
        cache.entity_has_changed("*****@*****.**", 3)
        cache.entity_has_changed("*****@*****.**", 4)

        # The cache is at the max size, 2
        self.assertEqual(len(cache._cache), 2)

        # The oldest item has been popped off
        self.assertTrue("*****@*****.**" not in cache._entity_to_key)

        self.assertEqual(
            cache.get_all_entities_changed(2), ["*****@*****.**", "*****@*****.**"],
        )
        self.assertIsNone(cache.get_all_entities_changed(1))

        # If we update an existing entity, it keeps the two existing entities
        cache.entity_has_changed("*****@*****.**", 5)
        self.assertEqual(
            {"*****@*****.**", "*****@*****.**"}, set(cache._entity_to_key)
        )
        self.assertEqual(
            cache.get_all_entities_changed(2), ["*****@*****.**", "*****@*****.**"],
        )
        self.assertIsNone(cache.get_all_entities_changed(1))
예제 #8
0
    def __init__(self, db_conn, hs):
        account_max = self.get_max_account_data_stream_id()
        self._account_data_stream_cache = StreamChangeCache(
            "AccountDataAndTagsChangeCache", account_max
        )

        super(AccountDataWorkerStore, self).__init__(db_conn, hs)
예제 #9
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        super().__init__(database, db_conn, hs)

        if hs.config.worker.worker_app is None:
            self._push_rules_stream_id_gen = StreamIdGenerator(
                db_conn, "push_rules_stream",
                "stream_id")  # type: Union[StreamIdGenerator, SlavedIdTracker]
        else:
            self._push_rules_stream_id_gen = SlavedIdTracker(
                db_conn, "push_rules_stream", "stream_id")

        push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict(
            db_conn,
            "push_rules_stream",
            entity_column="user_id",
            stream_column="stream_id",
            max_value=self.get_max_push_rules_stream_id(),
        )

        self.push_rules_stream_cache = StreamChangeCache(
            "PushRulesStreamChangeCache",
            push_rules_id,
            prefilled_cache=push_rules_prefill,
        )

        self._users_new_default_push_rules = hs.config.users_new_default_push_rules
예제 #10
0
    def __init__(self, hs):
        self.store = hs.get_datastore()
        self.server_name = hs.config.server_name
        self.auth = hs.get_auth()
        self.is_mine_id = hs.is_mine_id
        self.notifier = hs.get_notifier()
        self.state = hs.get_state_handler()

        self.hs = hs

        self.clock = hs.get_clock()
        self.wheel_timer = WheelTimer(bucket_size=5000)

        self.federation = hs.get_federation_sender()

        hs.get_federation_registry().register_edu_handler(
            "m.typing", self._recv_edu)

        hs.get_distributor().observe("user_left_room", self.user_left_room)

        self._member_typing_until = {}  # clock time we expect to stop
        self._member_last_federation_poke = {}

        self._latest_room_serial = 0
        self._reset()

        # caches which room_ids changed at which serials
        self._typing_stream_change_cache = StreamChangeCache(
            "TypingStreamChangeCache", self._latest_room_serial)

        self.clock.looping_call(self._handle_timeouts, 5000)
예제 #11
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        super().__init__(database, db_conn, hs)

        if hs.config.worker.worker_app is None:
            self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
                db_conn, "push_rules_stream", "stream_id")
        else:
            self._push_rules_stream_id_gen = SlavedIdTracker(
                db_conn, "push_rules_stream", "stream_id")

        push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict(
            db_conn,
            "push_rules_stream",
            entity_column="user_id",
            stream_column="stream_id",
            max_value=self.get_max_push_rules_stream_id(),
        )

        self.push_rules_stream_cache = StreamChangeCache(
            "PushRulesStreamChangeCache",
            push_rules_id,
            prefilled_cache=push_rules_prefill,
        )
 def test_prefilled_cache(self):
     """
     Providing a prefilled cache to StreamChangeCache will result in a cache
     with the prefilled-cache entered in.
     """
     cache = StreamChangeCache("#test", 1, prefilled_cache={"*****@*****.**": 2})
     self.assertTrue(cache.has_entity_changed("*****@*****.**", 1))
    def test_has_entity_changed(self):
        """
        StreamChangeCache.entity_has_changed will mark entities as changed, and
        has_entity_changed will observe the changed entities.
        """
        cache = StreamChangeCache("#test", 3)

        cache.entity_has_changed("*****@*****.**", 6)
        cache.entity_has_changed("*****@*****.**", 7)

        # If it's been changed after that stream position, return True
        self.assertTrue(cache.has_entity_changed("*****@*****.**", 4))
        self.assertTrue(cache.has_entity_changed("*****@*****.**", 4))

        # If it's been changed at that stream position, return False
        self.assertFalse(cache.has_entity_changed("*****@*****.**", 6))

        # If there's no changes after that stream position, return False
        self.assertFalse(cache.has_entity_changed("*****@*****.**", 7))

        # If the entity does not exist, return False.
        self.assertFalse(cache.has_entity_changed("*****@*****.**", 7))

        # If we request before the stream cache's earliest known position,
        # return True, whether it's a known entity or not.
        self.assertTrue(cache.has_entity_changed("*****@*****.**", 0))
        self.assertTrue(cache.has_entity_changed("*****@*****.**", 0))
    def test_has_any_entity_changed(self):
        """
        StreamChangeCache.has_any_entity_changed will return True if any
        entities have been changed since the provided stream position, and
        False if they have not.  If the cache has entries and the provided
        stream position is before it, it will return True, otherwise False if
        the cache has no entries.
        """
        cache = StreamChangeCache("#test", 1)

        # With no entities, it returns False for the past, present, and future.
        self.assertFalse(cache.has_any_entity_changed(0))
        self.assertFalse(cache.has_any_entity_changed(1))
        self.assertFalse(cache.has_any_entity_changed(2))

        # We add an entity
        cache.entity_has_changed("*****@*****.**", 2)

        # With an entity, it returns True for the past, the stream start
        # position, and False for the stream position the entity was changed
        # on and ones after it.
        self.assertTrue(cache.has_any_entity_changed(0))
        self.assertTrue(cache.has_any_entity_changed(1))
        self.assertFalse(cache.has_any_entity_changed(2))
        self.assertFalse(cache.has_any_entity_changed(3))
예제 #15
0
    def test_get_entities_changed(self):
        """
        StreamChangeCache.get_entities_changed will return the entities in the
        given list that have changed since the provided stream ID.  If the
        stream position is earlier than the earliest known position, it will
        return all of the entities queried for.
        """
        cache = StreamChangeCache("#test", 1)

        cache.entity_has_changed("*****@*****.**", 2)
        cache.entity_has_changed("*****@*****.**", 3)
        cache.entity_has_changed("*****@*****.**", 4)

        # Query all the entries, but mid-way through the stream. We should only
        # get the ones after that point.
        self.assertEqual(
            cache.get_entities_changed(
                ["*****@*****.**", "*****@*****.**", "*****@*****.**"], stream_pos=2
            ),
            set(["*****@*****.**", "*****@*****.**"]),
        )

        # Query all the entries mid-way through the stream, but include one
        # that doesn't exist in it. We should get back the one that doesn't
        # exist, too.
        self.assertEqual(
            cache.get_entities_changed(
                [
                    "*****@*****.**",
                    "*****@*****.**",
                    "*****@*****.**",
                    "*****@*****.**",
                ],
                stream_pos=2,
            ),
            set(["*****@*****.**", "*****@*****.**", "*****@*****.**"]),
        )

        # Query all the entries, but before the first known point. We will get
        # all the entries we queried for, including ones that don't exist.
        self.assertEqual(
            cache.get_entities_changed(
                [
                    "*****@*****.**",
                    "*****@*****.**",
                    "*****@*****.**",
                    "*****@*****.**",
                ],
                stream_pos=0,
            ),
            set(
                [
                    "*****@*****.**",
                    "*****@*****.**",
                    "*****@*****.**",
                    "*****@*****.**",
                ]
            ),
        )
예제 #16
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        self._instance_name = hs.get_instance_name()
        self._receipts_id_gen: AbstractStreamIdTracker

        if isinstance(database.engine, PostgresEngine):
            self._can_write_to_receipts = (
                self._instance_name in hs.config.worker.writers.receipts
            )

            self._receipts_id_gen = MultiWriterIdGenerator(
                db_conn=db_conn,
                db=database,
                stream_name="receipts",
                instance_name=self._instance_name,
                tables=[("receipts_linearized", "instance_name", "stream_id")],
                sequence_name="receipts_sequence",
                writers=hs.config.worker.writers.receipts,
            )
        else:
            self._can_write_to_receipts = True

            # We shouldn't be running in worker mode with SQLite, but its useful
            # to support it for unit tests.
            #
            # If this process is the writer than we need to use
            # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
            # updated over replication. (Multiple writers are not supported for
            # SQLite).
            if hs.get_instance_name() in hs.config.worker.writers.receipts:
                self._receipts_id_gen = StreamIdGenerator(
                    db_conn, "receipts_linearized", "stream_id"
                )
            else:
                self._receipts_id_gen = SlavedIdTracker(
                    db_conn, "receipts_linearized", "stream_id"
                )

        super().__init__(database, db_conn, hs)

        max_receipts_stream_id = self.get_max_receipt_stream_id()
        receipts_stream_prefill, min_receipts_stream_id = self.db_pool.get_cache_dict(
            db_conn,
            "receipts_linearized",
            entity_column="room_id",
            stream_column="stream_id",
            max_value=max_receipts_stream_id,
            limit=10000,
        )
        self._receipts_stream_cache = StreamChangeCache(
            "ReceiptsRoomChangeCache",
            min_receipts_stream_id,
            prefilled_cache=receipts_stream_prefill,
        )
예제 #17
0
    def __init__(self, database: Database, db_conn, hs):
        super(SlavedPresenceStore, self).__init__(database, db_conn, hs)
        self._presence_id_gen = SlavedIdTracker(db_conn, "presence_stream", "stream_id")

        self._presence_on_startup = self._get_active_presence(db_conn)

        self.presence_stream_cache = self.presence_stream_cache = StreamChangeCache(
            "PresenceStreamChangeCache", self._presence_id_gen.get_current_token()
        )
예제 #18
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        super().__init__(database, db_conn, hs)

        # `_can_write_to_account_data` indicates whether the current worker is allowed
        # to write account data. A value of `True` implies that `_account_data_id_gen`
        # is an `AbstractStreamIdGenerator` and not just a tracker.
        self._account_data_id_gen: AbstractStreamIdTracker

        if isinstance(database.engine, PostgresEngine):
            self._can_write_to_account_data = (
                self._instance_name in hs.config.worker.writers.account_data)

            self._account_data_id_gen = MultiWriterIdGenerator(
                db_conn=db_conn,
                db=database,
                stream_name="account_data",
                instance_name=self._instance_name,
                tables=[
                    ("room_account_data", "instance_name", "stream_id"),
                    ("room_tags_revisions", "instance_name", "stream_id"),
                    ("account_data", "instance_name", "stream_id"),
                ],
                sequence_name="account_data_sequence",
                writers=hs.config.worker.writers.account_data,
            )
        else:
            # We shouldn't be running in worker mode with SQLite, but its useful
            # to support it for unit tests.
            #
            # If this process is the writer than we need to use
            # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
            # updated over replication. (Multiple writers are not supported for
            # SQLite).
            if self._instance_name in hs.config.worker.writers.account_data:
                self._can_write_to_account_data = True
                self._account_data_id_gen = StreamIdGenerator(
                    db_conn,
                    "room_account_data",
                    "stream_id",
                    extra_tables=[("room_tags_revisions", "stream_id")],
                )
            else:
                self._account_data_id_gen = SlavedIdTracker(
                    db_conn,
                    "room_account_data",
                    "stream_id",
                    extra_tables=[("room_tags_revisions", "stream_id")],
                )

        account_max = self.get_max_account_data_stream_id()
        self._account_data_stream_cache = StreamChangeCache(
            "AccountDataAndTagsChangeCache", account_max)
예제 #19
0
 def __init__(self, db_conn, hs):
     super(SlavedAccountDataStore, self).__init__(db_conn, hs)
     self._account_data_id_gen = SlavedIdTracker(
         db_conn, "account_data_max_stream_id", "stream_id",
     )
     self._account_data_stream_cache = StreamChangeCache(
         "AccountDataAndTagsChangeCache",
         self._account_data_id_gen.get_current_token(),
     )
예제 #20
0
    def __init__(self, db_conn, hs):
        super(SlavedReceiptsStore, self).__init__(db_conn, hs)

        self._receipts_id_gen = SlavedIdTracker(db_conn, "receipts_linearized",
                                                "stream_id")

        self._receipts_stream_cache = StreamChangeCache(
            "ReceiptsRoomChangeCache",
            self._receipts_id_gen.get_current_token())
예제 #21
0
    def test_get_all_entities_changed(self):
        """
        StreamChangeCache.get_all_entities_changed will return all changed
        entities since the given position.  If the position is before the start
        of the known stream, it returns None instead.
        """
        cache = StreamChangeCache("#test", 1)

        cache.entity_has_changed("*****@*****.**", 2)
        cache.entity_has_changed("*****@*****.**", 3)
        cache.entity_has_changed("*****@*****.**", 3)
        cache.entity_has_changed("*****@*****.**", 4)

        r = cache.get_all_entities_changed(1)

        # either of these are valid
        ok1 = [
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
        ]
        ok2 = [
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
        ]
        self.assertTrue(r == ok1 or r == ok2)

        r = cache.get_all_entities_changed(2)
        self.assertTrue(r == ok1[1:] or r == ok2[1:])

        self.assertEqual(cache.get_all_entities_changed(3),
                         ["*****@*****.**"])
        self.assertEqual(cache.get_all_entities_changed(0), None)

        # ... later, things gest more updates
        cache.entity_has_changed("*****@*****.**", 5)
        cache.entity_has_changed("*****@*****.**", 5)
        cache.entity_has_changed("*****@*****.**", 6)

        ok1 = [
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
        ]
        ok2 = [
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
            "*****@*****.**",
        ]
        r = cache.get_all_entities_changed(3)
        self.assertTrue(r == ok1 or r == ok2)
예제 #22
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        super(SlavedDeviceInboxStore, self).__init__(database, db_conn, hs)
        self._device_inbox_id_gen = SlavedIdTracker(db_conn, "device_inbox",
                                                    "stream_id")
        self._device_inbox_stream_cache = StreamChangeCache(
            "DeviceInboxStreamChangeCache",
            self._device_inbox_id_gen.get_current_token(),
        )
        self._device_federation_outbox_stream_cache = StreamChangeCache(
            "DeviceFederationOutboxStreamChangeCache",
            self._device_inbox_id_gen.get_current_token(),
        )

        self._last_device_delete_cache = ExpiringCache(
            cache_name="last_device_delete_cache",
            clock=self._clock,
            max_len=10000,
            expiry_ms=30 * 60 * 1000,
        )
예제 #23
0
파일: stream.py 프로젝트: qu3stbaby/synapse
    def __init__(self, db_conn, hs):
        super(StreamWorkerStore, self).__init__(db_conn, hs)

        events_max = self.get_room_max_stream_ordering()
        event_cache_prefill, min_event_val = self._get_cache_dict(
            db_conn, "events",
            entity_column="room_id",
            stream_column="stream_ordering",
            max_value=events_max,
        )
        self._events_stream_cache = StreamChangeCache(
            "EventsRoomStreamChangeCache", min_event_val,
            prefilled_cache=event_cache_prefill,
        )
        self._membership_stream_cache = StreamChangeCache(
            "MembershipStreamChangeCache", events_max,
        )

        self._stream_order_on_start = self.get_room_max_stream_ordering()
예제 #24
0
 def __init__(self, db_conn, hs):
     super(SlavedPushRuleStore, self).__init__(db_conn, hs)
     self._push_rules_stream_id_gen = SlavedIdTracker(
         db_conn,
         "push_rules_stream",
         "stream_id",
     )
     self.push_rules_stream_cache = StreamChangeCache(
         "PushRulesStreamChangeCache",
         self._push_rules_stream_id_gen.get_current_token(),
     )
예제 #25
0
파일: groups.py 프로젝트: vishnumg/synapse
    def __init__(self, database: Database, db_conn, hs):
        super(SlavedGroupServerStore, self).__init__(database, db_conn, hs)

        self.hs = hs

        self._group_updates_id_gen = SlavedIdTracker(
            db_conn, "local_group_updates", "stream_id"
        )
        self._group_updates_stream_cache = StreamChangeCache(
            "_group_updates_stream_cache",
            self._group_updates_id_gen.get_current_token(),
        )
예제 #26
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        self._instance_name = hs.get_instance_name()

        if isinstance(database.engine, PostgresEngine):
            self._can_write_to_account_data = (
                self._instance_name in hs.config.worker.writers.account_data
            )

            self._account_data_id_gen = MultiWriterIdGenerator(
                db_conn=db_conn,
                db=database,
                stream_name="account_data",
                instance_name=self._instance_name,
                tables=[
                    ("room_account_data", "instance_name", "stream_id"),
                    ("room_tags_revisions", "instance_name", "stream_id"),
                    ("account_data", "instance_name", "stream_id"),
                ],
                sequence_name="account_data_sequence",
                writers=hs.config.worker.writers.account_data,
            )
        else:
            self._can_write_to_account_data = True

            # We shouldn't be running in worker mode with SQLite, but its useful
            # to support it for unit tests.
            #
            # If this process is the writer than we need to use
            # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
            # updated over replication. (Multiple writers are not supported for
            # SQLite).
            if hs.get_instance_name() in hs.config.worker.writers.account_data:
                self._account_data_id_gen = StreamIdGenerator(
                    db_conn,
                    "room_account_data",
                    "stream_id",
                    extra_tables=[("room_tags_revisions", "stream_id")],
                )
            else:
                self._account_data_id_gen = SlavedIdTracker(
                    db_conn,
                    "room_account_data",
                    "stream_id",
                    extra_tables=[("room_tags_revisions", "stream_id")],
                )

        account_max = self.get_max_account_data_stream_id()
        self._account_data_stream_cache = StreamChangeCache(
            "AccountDataAndTagsChangeCache", account_max
        )

        super().__init__(database, db_conn, hs)
예제 #27
0
파일: push_rule.py 프로젝트: syamgk/synapse
    def __init__(self, db_conn, hs):
        super(PushRulesWorkerStore, self).__init__(db_conn, hs)

        push_rules_prefill, push_rules_id = self._get_cache_dict(
            db_conn,
            "push_rules_stream",
            entity_column="user_id",
            stream_column="stream_id",
            max_value=self.get_max_push_rules_stream_id(),
        )

        self.push_rules_stream_cache = StreamChangeCache(
            "PushRulesStreamChangeCache",
            push_rules_id,
            prefilled_cache=push_rules_prefill,
        )
예제 #28
0
    def __init__(self, db_conn, hs):
        super(UserDirectorySlaveStore, self).__init__(db_conn, hs)

        events_max = self._stream_id_gen.get_current_token()
        curr_state_delta_prefill, min_curr_state_delta_id = self._get_cache_dict(
            db_conn,
            "current_state_delta_stream",
            entity_column="room_id",
            stream_column="stream_id",
            max_value=events_max,  # As we share the stream id with events token
            limit=1000,
        )
        self._curr_state_delta_stream_cache = StreamChangeCache(
            "_curr_state_delta_stream_cache",
            min_curr_state_delta_id,
            prefilled_cache=curr_state_delta_prefill,
        )
예제 #29
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        super().__init__(database, db_conn, hs)

        self.hs = hs

        self._group_updates_id_gen = SlavedIdTracker(db_conn,
                                                     "local_group_updates",
                                                     "stream_id")
        self._group_updates_stream_cache = StreamChangeCache(
            "_group_updates_stream_cache",
            self._group_updates_id_gen.get_current_token(),
        )
예제 #30
0
파일: events.py 프로젝트: lxndrbnsv/synapse
    def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"):
        super().__init__(database, db_conn, hs)

        events_max = self._stream_id_gen.get_current_token()
        curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
            db_conn,
            "current_state_delta_stream",
            entity_column="room_id",
            stream_column="stream_id",
            max_value=events_max,  # As we share the stream id with events token
            limit=1000,
        )
        self._curr_state_delta_stream_cache = StreamChangeCache(
            "_curr_state_delta_stream_cache",
            min_curr_state_delta_id,
            prefilled_cache=curr_state_delta_prefill,
        )