Example #1
0
    async def get_joined_hosts(self, room_id: str, state_entry):
        state_group = state_entry.state_group
        if not state_group:
            # If state_group is None it means it has yet to be assigned a
            # state group, i.e. we need to make sure that calls with a state_group
            # of None don't hit previous cached calls with a None state_group.
            # To do this we set the state_group to a new object as object() != object()
            state_group = object()

        with Measure(self._clock, "get_joined_hosts"):
            return await self._get_joined_hosts(room_id,
                                                state_group,
                                                state_entry.state,
                                                state_entry=state_entry)
Example #2
0
    def _fetch_event_list(self, conn, event_list):
        """Handle a load of requests from the _event_fetch_list queue

        Args:
            conn (twisted.enterprise.adbapi.Connection): database connection

            event_list (list[Tuple[list[str], Deferred]]):
                The fetch requests. Each entry consists of a list of event
                ids to be fetched, and a deferred to be completed once the
                events have been fetched.

        """
        with Measure(self._clock, "_fetch_event_list"):
            try:
                event_id_lists = list(zip(*event_list))[0]
                event_ids = [
                    item for sublist in event_id_lists for item in sublist
                ]

                rows = self._new_transaction(conn, "do_fetch", [], [],
                                             self._fetch_event_rows, event_ids)

                row_dict = {r["event_id"]: r for r in rows}

                # We only want to resolve deferreds from the main thread
                def fire(lst, res):
                    for ids, d in lst:
                        if not d.called:
                            try:
                                with PreserveLoggingContext():
                                    d.callback(
                                        [res[i] for i in ids if i in res])
                            except Exception:
                                logger.exception("Failed to callback")

                with PreserveLoggingContext():
                    self.hs.get_reactor().callFromThread(
                        fire, event_list, row_dict)
            except Exception as e:
                logger.exception("do_fetch")

                # We only want to resolve deferreds from the main thread
                def fire(evs, exc):
                    for _, d in evs:
                        if not d.called:
                            with PreserveLoggingContext():
                                d.errback(exc)

                with PreserveLoggingContext():
                    self.hs.get_reactor().callFromThread(fire, event_list, e)
Example #3
0
    async def notify_interested_services_ephemeral(
        self,
        stream_key: str,
        new_token: Optional[int],
        users: Collection[UserID] = [],
    ):
        """This is called by the notifier in the background
        when a ephemeral event handled by the homeserver.

        This will determine which appservices
        are interested in the event, and submit them.

        Events will only be pushed to appservices
        that have opted into ephemeral events

        Args:
            stream_key: The stream the event came from.
            new_token: The latest stream token
            users: The user(s) involved with the event.
        """
        services = [
            service for service in self.store.get_app_services()
            if service.supports_ephemeral
        ]
        if not services or not self.notify_appservices:
            return
        logger.info("Checking interested services for %s" % (stream_key))
        with Measure(self.clock, "notify_interested_services_ephemeral"):
            for service in services:
                # Only handle typing if we have the latest token
                if stream_key == "typing_key" and new_token is not None:
                    events = await self._handle_typing(service, new_token)
                    if events:
                        self.scheduler.submit_ephemeral_events_for_as(
                            service, events)
                    # We don't persist the token for typing_key for performance reasons
                elif stream_key == "receipt_key":
                    events = await self._handle_receipts(service)
                    if events:
                        self.scheduler.submit_ephemeral_events_for_as(
                            service, events)
                    await self.store.set_type_stream_id_for_appservice(
                        service, "read_receipt", new_token)
                elif stream_key == "presence_key":
                    events = await self._handle_presence(service, users)
                    if events:
                        self.scheduler.submit_ephemeral_events_for_as(
                            service, events)
                    await self.store.set_type_stream_id_for_appservice(
                        service, "presence", new_token)
Example #4
0
    def resolve_events(self, state_sets, event):
        logger.info("Resolving state for %s with %d groups", event.room_id,
                    len(state_sets))
        state_set_ids = [{(ev.type, ev.state_key): ev.event_id
                          for ev in st} for st in state_sets]

        state_map = {ev.event_id: ev for st in state_sets for ev in st}

        with Measure(self.clock, "state._resolve_events"):
            new_state = resolve_events(state_set_ids, state_map)

        new_state = {key: state_map[ev_id] for key, ev_id in new_state.items()}

        return new_state
Example #5
0
    def check_host_in_room(self, room_id, host):
        with Measure(self.clock, "check_host_in_room"):
            latest_event_ids = yield self.store.get_latest_event_ids_in_room(
                room_id)

            logger.debug(
                "calling resolve_state_groups from check_host_in_room")
            entry = yield self.state.resolve_state_groups(
                room_id, latest_event_ids)

            ret = yield self.store.is_host_joined(room_id, host,
                                                  entry.state_group,
                                                  entry.state)
            defer.returnValue(ret)
Example #6
0
    async def check_media_file_for_spam(
        self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
    ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]:
        """Checks if a piece of newly uploaded media should be blocked.

        This will be called for local uploads, downloads of remote media, each
        thumbnail generated for those, and web pages/images used for URL
        previews.

        Note that care should be taken to not do blocking IO operations in the
        main thread. For example, to get the contents of a file a module
        should do::

            async def check_media_file_for_spam(
                self, file: ReadableFileWrapper, file_info: FileInfo
            ) -> Union[Codes, Literal["NOT_SPAM"]]:
                buffer = BytesIO()
                await file.write_chunks_to(buffer.write)

                if buffer.getvalue() == b"Hello World":
                    return synapse.module_api.NOT_SPAM

                return Codes.FORBIDDEN


        Args:
            file: An object that allows reading the contents of the media.
            file_info: Metadata about the file.
        """

        for callback in self._check_media_file_for_spam_callbacks:
            with Measure(
                    self.clock, "{}.{}".format(callback.__module__,
                                               callback.__qualname__)):
                res = await delay_cancellation(
                    callback(file_wrapper, file_info))
                # Normalize return values to `Codes` or `"NOT_SPAM"`.
                if res is False or res is self.NOT_SPAM:
                    continue
                elif res is True:
                    return synapse.api.errors.Codes.FORBIDDEN
                elif isinstance(res, synapse.api.errors.Codes):
                    return res
                else:
                    logger.warning(
                        "Module returned invalid value, rejecting media file as spam"
                    )
                    return synapse.api.errors.Codes.FORBIDDEN

        return self.NOT_SPAM
    def _handle_request(self, request, query_type):
        with Measure(self.clock, "repl_fed_query_parse"):
            content = parse_json_object_from_request(request)

            args = content["args"]

        logger.info(
            "Got %r query",
            query_type,
        )

        result = yield self.registry.on_query(query_type, args)

        defer.returnValue((200, result))
Example #8
0
    def _clear_queue_before_pos(self, position_to_delete):
        """Clear all the queues from before a given position"""
        with Measure(self.clock, "send_queue._clear"):
            # Delete things out of presence maps
            keys = self.presence_changed.keys()
            i = self.presence_changed.bisect_left(position_to_delete)
            for key in keys[:i]:
                del self.presence_changed[key]

            user_ids = {
                user_id for uids in self.presence_changed.values() for user_id in uids
            }

            keys = self.presence_destinations.keys()
            i = self.presence_destinations.bisect_left(position_to_delete)
            for key in keys[:i]:
                del self.presence_destinations[key]

            user_ids.update(
                user_id for user_id, _ in self.presence_destinations.values()
            )

            to_del = [
                user_id for user_id in self.presence_map if user_id not in user_ids
            ]
            for user_id in to_del:
                del self.presence_map[user_id]

            # Delete things out of keyed edus
            keys = self.keyed_edu_changed.keys()
            i = self.keyed_edu_changed.bisect_left(position_to_delete)
            for key in keys[:i]:
                del self.keyed_edu_changed[key]

            live_keys = set()
            for edu_key in self.keyed_edu_changed.values():
                live_keys.add(edu_key)

            keys_to_del = [
                edu_key for edu_key in self.keyed_edu if edu_key not in live_keys
            ]
            for edu_key in keys_to_del:
                del self.keyed_edu[edu_key]

            # Delete things out of edu map
            keys = self.edus.keys()
            i = self.edus.bisect_left(position_to_delete)
            for key in keys[:i]:
                del self.edus[key]
Example #9
0
    def get_new_events(self, from_key, room_ids, **kwargs):
        with Measure(self.clock, "typing.get_new_events"):
            from_key = int(from_key)
            handler = self.get_typing_handler()

            events = []
            for room_id in room_ids:
                if room_id not in handler._room_serials:
                    continue
                if handler._room_serials[room_id] <= from_key:
                    continue

                events.append(self._make_event_for(room_id))

            return events, handler._latest_room_serial
Example #10
0
    def get_joined_users_from_state(self, room_id, state_entry):
        state_group = state_entry.state_group
        if not state_group:
            # If state_group is None it means it has yet to be assigned a
            # state group, i.e. we need to make sure that calls with a state_group
            # of None don't hit previous cached calls with a None state_group.
            # To do this we set the state_group to a new object as object() != object()
            state_group = object()

        with Measure(self._clock, "get_joined_users_from_state"):
            return (yield
                    self._get_joined_users_from_context(room_id,
                                                        state_group,
                                                        state_entry.state,
                                                        context=state_entry))
Example #11
0
    async def resolve_events_with_store(
        self,
        room_id: str,
        room_version: str,
        state_sets: Sequence[StateMap[str]],
        event_map: Optional[Dict[str, EventBase]],
        state_res_store: "StateResolutionStore",
    ) -> StateMap[str]:
        """
        Args:
            room_id: the room we are working in

            room_version: Version of the room

            state_sets: List of dicts of (type, state_key) -> event_id,
                which are the different state groups to resolve.

            event_map:
                a dict from event_id to event, for any events that we happen to
                have in flight (eg, those currently being persisted). This will be
                used as a starting point fof finding the state we need; any missing
                events will be requested via state_map_factory.

                If None, all events will be fetched via state_res_store.

            state_res_store: a place to fetch events from

        Returns:
            a map from (type, state_key) to event_id.
        """
        try:
            with Measure(self.clock, "state._resolve_events") as m:
                v = KNOWN_ROOM_VERSIONS[room_version]
                if v.state_res == StateResolutionVersions.V1:
                    return await v1.resolve_events_with_store(
                        room_id, state_sets, event_map,
                        state_res_store.get_events)
                else:
                    return await v2.resolve_events_with_store(
                        self.clock,
                        room_id,
                        room_version,
                        state_sets,
                        event_map,
                        state_res_store,
                    )
        finally:
            self._record_state_res_metrics(room_id, m.get_resource_usage())
Example #12
0
    async def _unsafe_process(self) -> None:
        # If self.pos is None then means we haven't fetched it from DB
        if self.pos is None:
            self.pos = await self.store.get_user_directory_stream_pos()

            # If still None then the initial background update hasn't happened yet.
            if self.pos is None:
                return None

            room_max_stream_ordering = self.store.get_room_max_stream_ordering(
            )
            if self.pos > room_max_stream_ordering:
                # apparently, we've processed more events than exist in the database!
                # this can happen if events are removed with history purge or similar.
                logger.warning(
                    "Event stream ordering appears to have gone backwards (%i -> %i): "
                    "rewinding user directory processor",
                    self.pos,
                    room_max_stream_ordering,
                )
                self.pos = room_max_stream_ordering

        # Loop round handling deltas until we're up to date
        while True:
            with Measure(self.clock, "user_dir_delta"):
                room_max_stream_ordering = self.store.get_room_max_stream_ordering(
                )
                if self.pos == room_max_stream_ordering:
                    return

                logger.debug("Processing user stats %s->%s", self.pos,
                             room_max_stream_ordering)
                (
                    max_pos,
                    deltas,
                ) = await self._storage_controllers.state.get_current_state_deltas(
                    self.pos, room_max_stream_ordering)

                logger.debug("Handling %d state deltas", len(deltas))
                await self._handle_deltas(deltas)

                self.pos = max_pos

                # Expose current event processing position to prometheus
                synapse.metrics.event_processing_positions.labels(
                    "user_dir").set(max_pos)

                await self.store.update_user_directory_stream_pos(max_pos)
Example #13
0
    def check(self, room_version, event, auth_events, do_sig_check=True):
        """ Checks if this event is correctly authed.

        Args:
            room_version (str): version of the room
            event: the event being checked.
            auth_events (dict: event-key -> event): the existing room state.


        Returns:
            True if the auth checks pass.
        """
        with Measure(self.clock, "auth.check"):
            event_auth.check(
                room_version, event, auth_events, do_sig_check=do_sig_check
            )
Example #14
0
    async def get_new_events(self, from_key: int, room_ids: Iterable[str],
                             **kwargs) -> Tuple[List[JsonDict], int]:
        with Measure(self.clock, "typing.get_new_events"):
            from_key = int(from_key)
            handler = self.get_typing_handler()

            events = []
            for room_id in room_ids:
                if room_id not in handler._room_serials:
                    continue
                if handler._room_serials[room_id] <= from_key:
                    continue

                events.append(self._make_event_for(room_id))

            return (events, handler._latest_room_serial)
Example #15
0
    def _handle_request(self, request, edu_type):
        with Measure(self.clock, "repl_fed_send_edu_parse"):
            content = parse_json_object_from_request(request)

            origin = content["origin"]
            edu_content = content["content"]

        logger.info(
            "Got %r edu from %s",
            edu_type,
            origin,
        )

        result = yield self.registry.on_edu(edu_type, origin, edu_content)

        defer.returnValue((200, result))
Example #16
0
    def _unsafe_process(self):
        # Loop round handling deltas until we're up to date
        while True:
            with Measure(self.clock, "presence_delta"):
                deltas = yield self.store.get_current_state_deltas(
                    self._event_pos)
                if not deltas:
                    return

                yield self._handle_state_delta(deltas)

                self._event_pos = deltas[-1]["stream_id"]

                # Expose current event processing position to prometheus
                synapse.metrics.event_processing_positions.labels(
                    "presence").set(self._event_pos)
Example #17
0
    async def _notify_interested_services_ephemeral(
        self,
        services: List[ApplicationService],
        stream_key: str,
        new_token: int,
        users: Collection[Union[str, UserID]],
    ) -> None:
        logger.debug("Checking interested services for %s", stream_key)
        with Measure(self.clock, "notify_interested_services_ephemeral"):
            for service in services:
                if stream_key == "typing_key":
                    # Note that we don't persist the token (via set_type_stream_id_for_appservice)
                    # for typing_key due to performance reasons and due to their highly
                    # ephemeral nature.
                    #
                    # Instead we simply grab the latest typing updates in _handle_typing
                    # and, if they apply to this application service, send it off.
                    events = await self._handle_typing(service, new_token)
                    if events:
                        self.scheduler.submit_ephemeral_events_for_as(
                            service, events)
                    continue

                # Since we read/update the stream position for this AS/stream
                with (await self._ephemeral_events_linearizer.queue(
                    (service.id, stream_key))):
                    if stream_key == "receipt_key":
                        events = await self._handle_receipts(
                            service, new_token)
                        if events:
                            self.scheduler.submit_ephemeral_events_for_as(
                                service, events)

                        # Persist the latest handled stream token for this appservice
                        await self.store.set_type_stream_id_for_appservice(
                            service, "read_receipt", new_token)

                    elif stream_key == "presence_key":
                        events = await self._handle_presence(
                            service, users, new_token)
                        if events:
                            self.scheduler.submit_ephemeral_events_for_as(
                                service, events)

                        # Persist the latest handled stream token for this appservice
                        await self.store.set_type_stream_id_for_appservice(
                            service, "presence", new_token)
Example #18
0
    def _resolve_events(self, state_sets, event_type=None, state_key=""):
        """
        Returns
            (dict[(str, str), synapse.events.FrozenEvent], list[str]): a tuple
            (new_state, prev_states). new_state is a map from (type, state_key)
            to event. prev_states is a list of event_ids.
        """
        with Measure(self.clock, "state._resolve_events"):
            state = {}
            for st in state_sets:
                for e in st:
                    state.setdefault((e.type, e.state_key), {})[e.event_id] = e

            unconflicted_state = {
                k: v.values()[0]
                for k, v in state.items() if len(v.values()) == 1
            }

            conflicted_state = {
                k: v.values()
                for k, v in state.items() if len(v.values()) > 1
            }

            if event_type:
                prev_states_events = conflicted_state.get(
                    (event_type, state_key), [])
                prev_states = [s.event_id for s in prev_states_events]
            else:
                prev_states = []

            auth_events = {
                k: e
                for k, e in unconflicted_state.items()
                if k[0] in AuthEventTypes
            }

            try:
                resolved_state = self._resolve_state_events(
                    conflicted_state, auth_events)
            except:
                logger.exception("Failed to resolve state")
                raise

            new_state = unconflicted_state
            new_state.update(resolved_state)

        return new_state, prev_states
Example #19
0
File: sync.py Project: vt0r/synapse
    def unread_notifs_for_room_id(self, room_id, sync_config):
        with Measure(self.clock, "unread_notifs_for_room_id"):
            last_unread_event_id = yield self.store.get_last_receipt_event_id_for_user(
                user_id=sync_config.user.to_string(),
                room_id=room_id,
                receipt_type="m.read")

            notifs = []
            if last_unread_event_id:
                notifs = yield self.store.get_unread_event_push_actions_by_room_for_user(
                    room_id, sync_config.user.to_string(),
                    last_unread_event_id)
                defer.returnValue(notifs)

            # There is no new information in this period, so your notification
            # count is whatever it was last time.
            defer.returnValue(None)
Example #20
0
    def _handle_timeouts(self):
        """Checks the presence of users that have timed out and updates as
        appropriate.
        """
        logger.info("Handling presence timeouts")
        now = self.clock.time_msec()

        try:
            with Measure(self.clock, "presence_handle_timeouts"):
                # Fetch the list of users that *may* have timed out. Things may have
                # changed since the timeout was set, so we won't necessarily have to
                # take any action.
                users_to_check = set(self.wheel_timer.fetch(now))

                # Check whether the lists of syncing processes from an external
                # process have expired.
                expired_process_ids = [
                    process_id for process_id, last_update
                    in self.external_process_last_updated_ms.items()
                    if now - last_update > EXTERNAL_PROCESS_EXPIRY
                ]
                for process_id in expired_process_ids:
                    users_to_check.update(
                        self.external_process_last_updated_ms.pop(process_id, ())
                    )
                    self.external_process_last_update.pop(process_id)

                states = [
                    self.user_to_current_state.get(
                        user_id, UserPresenceState.default(user_id)
                    )
                    for user_id in users_to_check
                ]

                timers_fired_counter.inc(len(states))

                changes = handle_timeouts(
                    states,
                    is_mine_fn=self.is_mine_id,
                    syncing_user_ids=self.get_currently_syncing_users(),
                    now=now,
                )

            run_in_background(self._update_states_and_catch_exception, changes)
        except Exception:
            logger.exception("Exception in _handle_timeouts loop")
Example #21
0
        async def do_iterations():
            try:
                with Measure(self.clock, "get_server_verify_keys"):
                    for f in self._key_fetchers:
                        if not remaining_requests:
                            return
                        await self._attempt_key_fetches_with_fetcher(
                            f, remaining_requests
                        )

                    # look for any requests which weren't satisfied
                    while remaining_requests:
                        verify_request = remaining_requests.pop()
                        rq_str = (
                            "VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)"
                            % (
                                verify_request.server_name,
                                verify_request.key_ids,
                                verify_request.minimum_valid_until_ts,
                            )
                        )

                        # If we run the errback immediately, it may cancel our
                        # loggingcontext while we are still in it, so instead we
                        # schedule it for the next time round the reactor.
                        #
                        # (this also ensures that we don't get a stack overflow if we
                        # has a massive queue of lookups waiting for this server).
                        self.clock.call_later(
                            0,
                            verify_request.key_ready.errback,
                            SynapseError(
                                401,
                                "Failed to find any key to satisfy %s" % (rq_str,),
                                Codes.UNAUTHORIZED,
                            ),
                        )
            except Exception as err:
                # we don't really expect to get here, because any errors should already
                # have been caught and logged. But if we do, let's log the error and make
                # sure that all of the deferreds are resolved.
                logger.error("Unexpected error in _get_server_verify_keys: %s", err)
                with PreserveLoggingContext():
                    for verify_request in remaining_requests:
                        if not verify_request.key_ready.called:
                            verify_request.key_ready.errback(err)
Example #22
0
    def _fetch_event_list(self, conn, event_list):
        """Handle a load of requests from the _event_fetch_list queue

        Args:
            conn (twisted.enterprise.adbapi.Connection): database connection

            event_list (list[Tuple[list[str], Deferred]]):
                The fetch requests. Each entry consists of a list of event
                ids to be fetched, and a deferred to be completed once the
                events have been fetched.

                The deferreds are callbacked with a dictionary mapping from event id
                to event row. Note that it may well contain additional events that
                were not part of this request.
        """
        with Measure(self._clock, "_fetch_event_list"):
            try:
                events_to_fetch = {
                    event_id
                    for events, _ in event_list for event_id in events
                }

                row_dict = self.db.new_transaction(conn, "do_fetch", [], [],
                                                   self._fetch_event_rows,
                                                   events_to_fetch)

                # We only want to resolve deferreds from the main thread
                def fire():
                    for _, d in event_list:
                        d.callback(row_dict)

                with PreserveLoggingContext():
                    self.hs.get_reactor().callFromThread(fire)
            except Exception as e:
                logger.exception("do_fetch")

                # We only want to resolve deferreds from the main thread
                def fire(evs, exc):
                    for _, d in evs:
                        if not d.called:
                            with PreserveLoggingContext():
                                d.errback(exc)

                with PreserveLoggingContext():
                    self.hs.get_reactor().callFromThread(fire, event_list, e)
Example #23
0
    def _send_request(self, service):
        if service.id in self.requests_in_flight:
            return

        self.requests_in_flight.add(service.id)
        try:
            while True:
                events = self.queued_events.pop(service.id, [])
                if not events:
                    return

                with Measure(self.clock, "servicequeuer.send"):
                    try:
                        yield self.txn_ctrl.send(service, events)
                    except Exception:
                        logger.exception("AS request failed")
        finally:
            self.requests_in_flight.discard(service.id)
Example #24
0
    def _get_event_from_row(self,
                            internal_metadata,
                            js,
                            redactions,
                            format_version,
                            rejected_reason=None):
        """Parse an event row which has been read from the database

        Args:
            internal_metadata (str): json-encoded internal_metadata column
            js (str): json-encoded event body from event_json
            redactions (list[str]): a list of the events which claim to have redacted
                this event, from the redactions table
            format_version: (str): the 'format_version' column
            rejected_reason (str|None): the reason this event was rejected, if any

        Returns:
            _EventCacheEntry
        """
        with Measure(self._clock, "_get_event_from_row"):
            d = json.loads(js)
            internal_metadata = json.loads(internal_metadata)

            if format_version is None:
                # This means that we stored the event before we had the concept
                # of a event format version, so it must be a V1 event.
                format_version = EventFormatVersions.V1

            original_ev = event_type_from_format_version(format_version)(
                event_dict=d,
                internal_metadata_dict=internal_metadata,
                rejected_reason=rejected_reason,
            )

            redacted_event = yield self._maybe_redact_event_row(
                original_ev, redactions)

            cache_entry = _EventCacheEntry(event=original_ev,
                                           redacted_event=redacted_event)

            self._get_event_cache.prefill((original_ev.event_id, ),
                                          cache_entry)

        defer.returnValue(cache_entry)
    def _process(self):
        if self.processing:
            return

        with LoggingContext("push._process"):
            with Measure(self.clock, "push._process"):
                try:
                    self.processing = True
                    # if the max ordering changes while we're running _unsafe_process,
                    # call it again, and so on until we've caught up.
                    while True:
                        starting_max_ordering = self.max_stream_ordering
                        try:
                            yield self._unsafe_process()
                        except Exception:
                            logger.exception("Exception processing notifs")
                        if self.max_stream_ordering == starting_max_ordering:
                            break
                finally:
                    self.processing = False
Example #26
0
    async def _handle_request(self, request, event_id):
        with Measure(self.clock, "repl_send_event_parse"):
            content = parse_json_object_from_request(request)

            event_dict = content["event"]
            room_ver = KNOWN_ROOM_VERSIONS[content["room_version"]]
            internal_metadata = content["internal_metadata"]
            rejected_reason = content["rejected_reason"]

            event = make_event_from_dict(event_dict, room_ver,
                                         internal_metadata, rejected_reason)

            requester = Requester.deserialize(self.store, content["requester"])
            context = EventContext.deserialize(self.storage,
                                               content["context"])

            ratelimit = content["ratelimit"]
            extra_users = [
                UserID.from_string(u) for u in content["extra_users"]
            ]

        if requester.user:
            request.authenticated_entity = requester.user.to_string()

        logger.info("Got event to send with ID: %s into room: %s",
                    event.event_id, event.room_id)

        event = await self.event_creation_handler.persist_and_notify_client_event(
            requester,
            event,
            context,
            ratelimit=ratelimit,
            extra_users=extra_users)

        return (
            200,
            {
                "stream_id": event.internal_metadata.stream_ordering,
                "event_id": event.event_id,
            },
        )
Example #27
0
    def resolve_events(self, room_version, state_sets, event):
        logger.info("Resolving state for %s with %d groups", event.room_id,
                    len(state_sets))
        state_set_ids = [{(ev.type, ev.state_key): ev.event_id
                          for ev in st} for st in state_sets]

        state_map = {ev.event_id: ev for st in state_sets for ev in st}

        with Measure(self.clock, "state._resolve_events"):
            new_state = yield resolve_events_with_factory(
                room_version,
                state_set_ids,
                event_map=state_map,
                state_map_factory=self._state_map_factory)

        new_state = {
            key: state_map[ev_id]
            for key, ev_id in iteritems(new_state)
        }

        defer.returnValue(new_state)
Example #28
0
    def resolve_events(self, room_version, state_sets, event):
        logger.info("Resolving state for %s with %d groups", event.room_id,
                    len(state_sets))
        state_set_ids = [{(ev.type, ev.state_key): ev.event_id
                          for ev in st} for st in state_sets]

        state_map = {ev.event_id: ev for st in state_sets for ev in st}

        with Measure(self.clock, "state._resolve_events"):
            new_state = yield resolve_events_with_store(
                self.clock,
                event.room_id,
                room_version,
                state_set_ids,
                event_map=state_map,
                state_res_store=StateResolutionStore(self.store),
            )

        new_state = {key: state_map[ev_id] for key, ev_id in new_state.items()}

        return new_state
Example #29
0
    def on_new_event(
        self,
        stream_key: str,
        new_token: Union[int, RoomStreamToken],
        users: Collection[Union[str, UserID]] = [],
        rooms: Collection[str] = [],
    ):
        """ Used to inform listeners that something has happened event wise.

        Will wake up all listeners for the given users and rooms.
        """
        with PreserveLoggingContext():
            with Measure(self.clock, "on_new_event"):
                user_streams = set()

                for user in users:
                    user_stream = self.user_to_user_stream.get(str(user))
                    if user_stream is not None:
                        user_streams.add(user_stream)

                for room in rooms:
                    user_streams |= self.room_to_user_streams.get(room, set())

                time_now_ms = self.clock.time_msec()
                for user_stream in user_streams:
                    try:
                        user_stream.notify(stream_key, new_token, time_now_ms)
                    except Exception:
                        logger.exception("Failed to notify listener")

                self.notify_replication()

                # Notify appservices
                run_as_background_process(
                    "_notify_app_services_ephemeral",
                    self._notify_app_services_ephemeral,
                    stream_key,
                    new_token,
                    users,
                )
Example #30
0
    def _unsafe_process(self):
        # If self.pos is None then means we haven't fetched it from DB
        if self.pos is None:
            self.pos = yield self.store.get_user_directory_stream_pos()

        # If still None then we need to do the initial fill of directory
        if self.pos is None:
            yield self._do_initial_spam()
            self.pos = yield self.store.get_user_directory_stream_pos()

        # Loop round handling deltas until we're up to date
        while True:
            with Measure(self.clock, "user_dir_delta"):
                deltas = yield self.store.get_current_state_deltas(self.pos)
                if not deltas:
                    return

                logger.info("Handling %d state deltas", len(deltas))
                yield self._handle_deltas(deltas)

                self.pos = deltas[-1]["stream_id"]
                yield self.store.update_user_directory_stream_pos(self.pos)