Ejemplo n.º 1
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        super().__init__(database, db_conn, hs)

        if hs.config.worker.worker_app is None:
            self._push_rules_stream_id_gen = StreamIdGenerator(
                db_conn, "push_rules_stream",
                "stream_id")  # type: Union[StreamIdGenerator, SlavedIdTracker]
        else:
            self._push_rules_stream_id_gen = SlavedIdTracker(
                db_conn, "push_rules_stream", "stream_id")

        push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict(
            db_conn,
            "push_rules_stream",
            entity_column="user_id",
            stream_column="stream_id",
            max_value=self.get_max_push_rules_stream_id(),
        )

        self.push_rules_stream_cache = StreamChangeCache(
            "PushRulesStreamChangeCache",
            push_rules_id,
            prefilled_cache=push_rules_prefill,
        )

        self._users_new_default_push_rules = hs.config.users_new_default_push_rules
Ejemplo n.º 2
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        super().__init__(database, db_conn, hs)

        if hs.config.worker.worker_app is None:
            self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
                db_conn, "push_rules_stream", "stream_id")
        else:
            self._push_rules_stream_id_gen = SlavedIdTracker(
                db_conn, "push_rules_stream", "stream_id")

        push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict(
            db_conn,
            "push_rules_stream",
            entity_column="user_id",
            stream_column="stream_id",
            max_value=self.get_max_push_rules_stream_id(),
        )

        self.push_rules_stream_cache = StreamChangeCache(
            "PushRulesStreamChangeCache",
            push_rules_id,
            prefilled_cache=push_rules_prefill,
        )
Ejemplo n.º 3
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        super().__init__(database, db_conn, hs)

        self.hs = hs

        self._device_list_id_gen = SlavedIdTracker(
            db_conn,
            "device_lists_stream",
            "stream_id",
            extra_tables=[
                ("user_signature_stream", "stream_id"),
                ("device_lists_outbound_pokes", "stream_id"),
            ],
        )
        device_list_max = self._device_list_id_gen.get_current_token()
        self._device_list_stream_cache = StreamChangeCache(
            "DeviceListStreamChangeCache", device_list_max)
        self._user_signature_stream_cache = StreamChangeCache(
            "UserSignatureStreamChangeCache", device_list_max)
        self._device_list_federation_stream_cache = StreamChangeCache(
            "DeviceListFederationStreamChangeCache", device_list_max)
Ejemplo n.º 4
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        self._instance_name = hs.get_instance_name()
        self._receipts_id_gen: AbstractStreamIdTracker

        if isinstance(database.engine, PostgresEngine):
            self._can_write_to_receipts = (
                self._instance_name in hs.config.worker.writers.receipts
            )

            self._receipts_id_gen = MultiWriterIdGenerator(
                db_conn=db_conn,
                db=database,
                stream_name="receipts",
                instance_name=self._instance_name,
                tables=[("receipts_linearized", "instance_name", "stream_id")],
                sequence_name="receipts_sequence",
                writers=hs.config.worker.writers.receipts,
            )
        else:
            self._can_write_to_receipts = True

            # We shouldn't be running in worker mode with SQLite, but its useful
            # to support it for unit tests.
            #
            # If this process is the writer than we need to use
            # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
            # updated over replication. (Multiple writers are not supported for
            # SQLite).
            if hs.get_instance_name() in hs.config.worker.writers.receipts:
                self._receipts_id_gen = StreamIdGenerator(
                    db_conn, "receipts_linearized", "stream_id"
                )
            else:
                self._receipts_id_gen = SlavedIdTracker(
                    db_conn, "receipts_linearized", "stream_id"
                )

        super().__init__(database, db_conn, hs)

        max_receipts_stream_id = self.get_max_receipt_stream_id()
        receipts_stream_prefill, min_receipts_stream_id = self.db_pool.get_cache_dict(
            db_conn,
            "receipts_linearized",
            entity_column="room_id",
            stream_column="stream_id",
            max_value=max_receipts_stream_id,
            limit=10000,
        )
        self._receipts_stream_cache = StreamChangeCache(
            "ReceiptsRoomChangeCache",
            min_receipts_stream_id,
            prefilled_cache=receipts_stream_prefill,
        )
Ejemplo n.º 5
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        super().__init__(database, db_conn, hs)

        # `_can_write_to_account_data` indicates whether the current worker is allowed
        # to write account data. A value of `True` implies that `_account_data_id_gen`
        # is an `AbstractStreamIdGenerator` and not just a tracker.
        self._account_data_id_gen: AbstractStreamIdTracker

        if isinstance(database.engine, PostgresEngine):
            self._can_write_to_account_data = (
                self._instance_name in hs.config.worker.writers.account_data)

            self._account_data_id_gen = MultiWriterIdGenerator(
                db_conn=db_conn,
                db=database,
                stream_name="account_data",
                instance_name=self._instance_name,
                tables=[
                    ("room_account_data", "instance_name", "stream_id"),
                    ("room_tags_revisions", "instance_name", "stream_id"),
                    ("account_data", "instance_name", "stream_id"),
                ],
                sequence_name="account_data_sequence",
                writers=hs.config.worker.writers.account_data,
            )
        else:
            # We shouldn't be running in worker mode with SQLite, but its useful
            # to support it for unit tests.
            #
            # If this process is the writer than we need to use
            # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
            # updated over replication. (Multiple writers are not supported for
            # SQLite).
            if self._instance_name in hs.config.worker.writers.account_data:
                self._can_write_to_account_data = True
                self._account_data_id_gen = StreamIdGenerator(
                    db_conn,
                    "room_account_data",
                    "stream_id",
                    extra_tables=[("room_tags_revisions", "stream_id")],
                )
            else:
                self._account_data_id_gen = SlavedIdTracker(
                    db_conn,
                    "room_account_data",
                    "stream_id",
                    extra_tables=[("room_tags_revisions", "stream_id")],
                )

        account_max = self.get_max_account_data_stream_id()
        self._account_data_stream_cache = StreamChangeCache(
            "AccountDataAndTagsChangeCache", account_max)
Ejemplo n.º 6
0
    def __init__(self, database: Database, db_conn, hs):
        super(SlavedGroupServerStore, self).__init__(database, db_conn, hs)

        self.hs = hs

        self._group_updates_id_gen = SlavedIdTracker(
            db_conn, "local_group_updates", "stream_id"
        )
        self._group_updates_stream_cache = StreamChangeCache(
            "_group_updates_stream_cache",
            self._group_updates_id_gen.get_current_token(),
        )
Ejemplo n.º 7
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        self._account_data_id_gen = SlavedIdTracker(
            db_conn,
            "account_data",
            "stream_id",
            extra_tables=[
                ("room_account_data", "stream_id"),
                ("room_tags_revisions", "stream_id"),
            ],
        )

        super(SlavedAccountDataStore, self).__init__(database, db_conn, hs)
Ejemplo n.º 8
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        self._instance_name = hs.get_instance_name()

        if isinstance(database.engine, PostgresEngine):
            self._can_write_to_account_data = (
                self._instance_name in hs.config.worker.writers.account_data
            )

            self._account_data_id_gen = MultiWriterIdGenerator(
                db_conn=db_conn,
                db=database,
                stream_name="account_data",
                instance_name=self._instance_name,
                tables=[
                    ("room_account_data", "instance_name", "stream_id"),
                    ("room_tags_revisions", "instance_name", "stream_id"),
                    ("account_data", "instance_name", "stream_id"),
                ],
                sequence_name="account_data_sequence",
                writers=hs.config.worker.writers.account_data,
            )
        else:
            self._can_write_to_account_data = True

            # We shouldn't be running in worker mode with SQLite, but its useful
            # to support it for unit tests.
            #
            # If this process is the writer than we need to use
            # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
            # updated over replication. (Multiple writers are not supported for
            # SQLite).
            if hs.get_instance_name() in hs.config.worker.writers.account_data:
                self._account_data_id_gen = StreamIdGenerator(
                    db_conn,
                    "room_account_data",
                    "stream_id",
                    extra_tables=[("room_tags_revisions", "stream_id")],
                )
            else:
                self._account_data_id_gen = SlavedIdTracker(
                    db_conn,
                    "room_account_data",
                    "stream_id",
                    extra_tables=[("room_tags_revisions", "stream_id")],
                )

        account_max = self.get_max_account_data_stream_id()
        self._account_data_stream_cache = StreamChangeCache(
            "AccountDataAndTagsChangeCache", account_max
        )

        super().__init__(database, db_conn, hs)
Ejemplo n.º 9
0
    def __init__(self, db_conn, hs):
        super(SlavedDeviceStore, self).__init__(db_conn, hs)

        self.hs = hs

        self._device_list_id_gen = SlavedIdTracker(db_conn,
                                                   "device_lists_stream",
                                                   "stream_id")
        device_list_max = self._device_list_id_gen.get_current_token()
        self._device_list_stream_cache = StreamChangeCache(
            "DeviceListStreamChangeCache", device_list_max)
        self._device_list_federation_stream_cache = StreamChangeCache(
            "DeviceListFederationStreamChangeCache", device_list_max)
Ejemplo n.º 10
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        super(EventsWorkerStore, self).__init__(database, db_conn, hs)

        if hs.config.worker.writers.events == hs.get_instance_name():
            # We are the process in charge of generating stream ids for events,
            # so instantiate ID generators based on the database
            self._stream_id_gen = StreamIdGenerator(
                db_conn,
                "events",
                "stream_ordering",
            )
            self._backfill_id_gen = StreamIdGenerator(
                db_conn,
                "events",
                "stream_ordering",
                step=-1,
                extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
            )
        else:
            # Another process is in charge of persisting events and generating
            # stream IDs: rely on the replication streams to let us know which
            # IDs we can process.
            self._stream_id_gen = SlavedIdTracker(db_conn, "events",
                                                  "stream_ordering")
            self._backfill_id_gen = SlavedIdTracker(db_conn,
                                                    "events",
                                                    "stream_ordering",
                                                    step=-1)

        self._get_event_cache = Cache(
            "*getEvent*",
            keylen=3,
            max_entries=hs.config.caches.event_cache_size,
            apply_cache_factor_from_config=False,
        )

        self._event_fetch_lock = threading.Condition()
        self._event_fetch_list = []
        self._event_fetch_ongoing = 0
Ejemplo n.º 11
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        super().__init__(database, db_conn, hs)

        self.hs = hs

        self._group_updates_id_gen = SlavedIdTracker(db_conn,
                                                     "local_group_updates",
                                                     "stream_id")
        self._group_updates_stream_cache = StreamChangeCache(
            "_group_updates_stream_cache",
            self._group_updates_id_gen.get_current_token(),
        )
Ejemplo n.º 12
0
    def __init__(self, database: DatabasePool, db_conn, hs):
        super(SlavedDeviceInboxStore, self).__init__(database, db_conn, hs)
        self._device_inbox_id_gen = SlavedIdTracker(db_conn, "device_inbox",
                                                    "stream_id")
        self._device_inbox_stream_cache = StreamChangeCache(
            "DeviceInboxStreamChangeCache",
            self._device_inbox_id_gen.get_current_token(),
        )
        self._device_federation_outbox_stream_cache = StreamChangeCache(
            "DeviceFederationOutboxStreamChangeCache",
            self._device_inbox_id_gen.get_current_token(),
        )

        self._last_device_delete_cache = ExpiringCache(
            cache_name="last_device_delete_cache",
            clock=self._clock,
            max_len=10000,
            expiry_ms=30 * 60 * 1000,
        )
Ejemplo n.º 13
0
    def __init__(
        self,
        database: DatabasePool,
        db_conn: LoggingDatabaseConnection,
        hs: "HomeServer",
    ):
        self.hs = hs

        self._device_list_id_gen = SlavedIdTracker(
            db_conn,
            "device_lists_stream",
            "stream_id",
            extra_tables=[
                ("user_signature_stream", "stream_id"),
                ("device_lists_outbound_pokes", "stream_id"),
                ("device_lists_changes_in_room", "stream_id"),
            ],
        )

        super().__init__(database, db_conn, hs)
Ejemplo n.º 14
0
    def __init__(self, database: Database, db_conn, hs):
        self._account_data_id_gen = SlavedIdTracker(
            db_conn, "account_data_max_stream_id", "stream_id")

        super(SlavedAccountDataStore, self).__init__(database, db_conn, hs)