Beispiel #1
0
class RoomListHandler(BaseHandler):
    def __init__(self, hs):
        super(RoomListHandler, self).__init__(hs)
        self.response_cache = ResponseCache(hs)
        self.remote_response_cache = ResponseCache(hs, timeout_ms=30 * 1000)

    def get_local_public_room_list(
        self,
        limit=None,
        since_token=None,
        search_filter=None,
        network_tuple=EMTPY_THIRD_PARTY_ID,
    ):
        """Generate a local public room list.

        There are multiple different lists: the main one plus one per third
        party network. A client can ask for a specific list or to return all.

        Args:
            limit (int)
            since_token (str)
            search_filter (dict)
            network_tuple (ThirdPartyInstanceID): Which public list to use.
                This can be (None, None) to indicate the main list, or a particular
                appservice and network id to use an appservice specific one.
                Setting to None returns all public rooms across all lists.
        """
        logger.info(
            "Getting public room list: limit=%r, since=%r, search=%r, network=%r",
            limit,
            since_token,
            bool(search_filter),
            network_tuple,
        )
        if search_filter:
            # We explicitly don't bother caching searches or requests for
            # appservice specific lists.
            return self._get_public_room_list(
                limit,
                since_token,
                search_filter,
                network_tuple=network_tuple,
            )

        key = (limit, since_token, network_tuple)
        result = self.response_cache.get(key)
        if not result:
            result = self.response_cache.set(
                key,
                self._get_public_room_list(limit,
                                           since_token,
                                           network_tuple=network_tuple))
        return result

    @defer.inlineCallbacks
    def _get_public_room_list(
        self,
        limit=None,
        since_token=None,
        search_filter=None,
        network_tuple=EMTPY_THIRD_PARTY_ID,
    ):
        if since_token and since_token != "END":
            since_token = RoomListNextBatch.from_token(since_token)
        else:
            since_token = None

        rooms_to_order_value = {}
        rooms_to_num_joined = {}

        newly_visible = []
        newly_unpublished = []
        if since_token:
            stream_token = since_token.stream_ordering
            current_public_id = yield self.store.get_current_public_room_stream_id(
            )
            public_room_stream_id = since_token.public_room_stream_id
            newly_visible, newly_unpublished = yield self.store.get_public_room_changes(
                public_room_stream_id,
                current_public_id,
                network_tuple=network_tuple,
            )
        else:
            stream_token = yield self.store.get_room_max_stream_ordering()
            public_room_stream_id = yield self.store.get_current_public_room_stream_id(
            )

        room_ids = yield self.store.get_public_room_ids_at_stream_id(
            public_room_stream_id,
            network_tuple=network_tuple,
        )

        # We want to return rooms in a particular order: the number of joined
        # users. We then arbitrarily use the room_id as a tie breaker.

        @defer.inlineCallbacks
        def get_order_for_room(room_id):
            # Most of the rooms won't have changed between the since token and
            # now (especially if the since token is "now"). So, we can ask what
            # the current users are in a room (that will hit a cache) and then
            # check if the room has changed since the since token. (We have to
            # do it in that order to avoid races).
            # If things have changed then fall back to getting the current state
            # at the since token.
            joined_users = yield self.store.get_users_in_room(room_id)
            if self.store.has_room_changed_since(room_id, stream_token):
                latest_event_ids = yield self.store.get_forward_extremeties_for_room(
                    room_id, stream_token)

                if not latest_event_ids:
                    return

                joined_users = yield self.state_handler.get_current_user_in_room(
                    room_id,
                    latest_event_ids,
                )

            num_joined_users = len(joined_users)
            rooms_to_num_joined[room_id] = num_joined_users

            if num_joined_users == 0:
                return

            # We want larger rooms to be first, hence negating num_joined_users
            rooms_to_order_value[room_id] = (-num_joined_users, room_id)

        yield concurrently_execute(get_order_for_room, room_ids, 10)

        sorted_entries = sorted(rooms_to_order_value.items(),
                                key=lambda e: e[1])
        sorted_rooms = [room_id for room_id, _ in sorted_entries]

        # `sorted_rooms` should now be a list of all public room ids that is
        # stable across pagination. Therefore, we can use indices into this
        # list as our pagination tokens.

        # Filter out rooms that we don't want to return
        rooms_to_scan = [
            r for r in sorted_rooms
            if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
        ]

        total_room_count = len(rooms_to_scan)

        if since_token:
            # Filter out rooms we've already returned previously
            # `since_token.current_limit` is the index of the last room we
            # sent down, so we exclude it and everything before/after it.
            if since_token.direction_is_forward:
                rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
            else:
                rooms_to_scan = rooms_to_scan[:since_token.current_limit]
                rooms_to_scan.reverse()

        # Actually generate the entries. _append_room_entry_to_chunk will append to
        # chunk but will stop if len(chunk) > limit
        chunk = []
        if limit and not search_filter:
            step = limit + 1
            for i in xrange(0, len(rooms_to_scan), step):
                # We iterate here because the vast majority of cases we'll stop
                # at first iteration, but occaisonally _append_room_entry_to_chunk
                # won't append to the chunk and so we need to loop again.
                # We don't want to scan over the entire range either as that
                # would potentially waste a lot of work.
                yield concurrently_execute(
                    lambda r: self._append_room_entry_to_chunk(
                        r, rooms_to_num_joined[r], chunk, limit, search_filter
                    ), rooms_to_scan[i:i + step], 10)
                if len(chunk) >= limit + 1:
                    break
        else:
            yield concurrently_execute(
                lambda r: self._append_room_entry_to_chunk(
                    r, rooms_to_num_joined[r], chunk, limit, search_filter),
                rooms_to_scan, 5)

        chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))

        # Work out the new limit of the batch for pagination, or None if we
        # know there are no more results that would be returned.
        # i.e., [since_token.current_limit..new_limit] is the batch of rooms
        # we've returned (or the reverse if we paginated backwards)
        # We tried to pull out limit + 1 rooms above, so if we have <= limit
        # then we know there are no more results to return
        new_limit = None
        if chunk and (not limit or len(chunk) > limit):

            if not since_token or since_token.direction_is_forward:
                if limit:
                    chunk = chunk[:limit]
                last_room_id = chunk[-1]["room_id"]
            else:
                if limit:
                    chunk = chunk[-limit:]
                last_room_id = chunk[0]["room_id"]

            new_limit = sorted_rooms.index(last_room_id)

        results = {
            "chunk": chunk,
            "total_room_count_estimate": total_room_count,
        }

        if since_token:
            results["new_rooms"] = bool(newly_visible)

        if not since_token or since_token.direction_is_forward:
            if new_limit is not None:
                results["next_batch"] = RoomListNextBatch(
                    stream_ordering=stream_token,
                    public_room_stream_id=public_room_stream_id,
                    current_limit=new_limit,
                    direction_is_forward=True,
                ).to_token()

            if since_token:
                results["prev_batch"] = since_token.copy_and_replace(
                    direction_is_forward=False,
                    current_limit=since_token.current_limit + 1,
                ).to_token()
        else:
            if new_limit is not None:
                results["prev_batch"] = RoomListNextBatch(
                    stream_ordering=stream_token,
                    public_room_stream_id=public_room_stream_id,
                    current_limit=new_limit,
                    direction_is_forward=False,
                ).to_token()

            if since_token:
                results["next_batch"] = since_token.copy_and_replace(
                    direction_is_forward=True,
                    current_limit=since_token.current_limit - 1,
                ).to_token()

        defer.returnValue(results)

    @defer.inlineCallbacks
    def _append_room_entry_to_chunk(self, room_id, num_joined_users, chunk,
                                    limit, search_filter):
        """Generate the entry for a room in the public room list and append it
        to the `chunk` if it matches the search filter
        """
        if limit and len(chunk) > limit + 1:
            # We've already got enough, so lets just drop it.
            return

        result = yield self.generate_room_entry(room_id, num_joined_users)

        if result and _matches_room_entry(result, search_filter):
            chunk.append(result)

    @cachedInlineCallbacks(num_args=1, cache_context=True)
    def generate_room_entry(self,
                            room_id,
                            num_joined_users,
                            cache_context,
                            with_alias=True,
                            allow_private=False):
        """Returns the entry for a room
        """
        result = {
            "room_id": room_id,
            "num_joined_members": num_joined_users,
        }

        current_state_ids = yield self.store.get_current_state_ids(
            room_id,
            on_invalidate=cache_context.invalidate,
        )

        event_map = yield self.store.get_events([
            event_id for key, event_id in current_state_ids.iteritems()
            if key[0] in (
                EventTypes.JoinRules,
                EventTypes.Name,
                EventTypes.Topic,
                EventTypes.CanonicalAlias,
                EventTypes.RoomHistoryVisibility,
                EventTypes.GuestAccess,
                "m.room.avatar",
            )
        ])

        current_state = {(ev.type, ev.state_key): ev
                         for ev in event_map.values()}

        # Double check that this is actually a public room.
        join_rules_event = current_state.get((EventTypes.JoinRules, ""))
        if join_rules_event:
            join_rule = join_rules_event.content.get("join_rule", None)
            if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
                defer.returnValue(None)

        if with_alias:
            aliases = yield self.store.get_aliases_for_room(
                room_id, on_invalidate=cache_context.invalidate)
            if aliases:
                result["aliases"] = aliases

        name_event = yield current_state.get((EventTypes.Name, ""))
        if name_event:
            name = name_event.content.get("name", None)
            if name:
                result["name"] = name

        topic_event = current_state.get((EventTypes.Topic, ""))
        if topic_event:
            topic = topic_event.content.get("topic", None)
            if topic:
                result["topic"] = topic

        canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
        if canonical_event:
            canonical_alias = canonical_event.content.get("alias", None)
            if canonical_alias:
                result["canonical_alias"] = canonical_alias

        visibility_event = current_state.get(
            (EventTypes.RoomHistoryVisibility, ""))
        visibility = None
        if visibility_event:
            visibility = visibility_event.content.get("history_visibility",
                                                      None)
        result["world_readable"] = visibility == "world_readable"

        guest_event = current_state.get((EventTypes.GuestAccess, ""))
        guest = None
        if guest_event:
            guest = guest_event.content.get("guest_access", None)
        result["guest_can_join"] = guest == "can_join"

        avatar_event = current_state.get(("m.room.avatar", ""))
        if avatar_event:
            avatar_url = avatar_event.content.get("url", None)
            if avatar_url:
                result["avatar_url"] = avatar_url

        defer.returnValue(result)

    @defer.inlineCallbacks
    def get_remote_public_room_list(
        self,
        server_name,
        limit=None,
        since_token=None,
        search_filter=None,
        include_all_networks=False,
        third_party_instance_id=None,
    ):
        if search_filter:
            # We currently don't support searching across federation, so we have
            # to do it manually without pagination
            limit = None
            since_token = None

        res = yield self._get_remote_list_cached(
            server_name,
            limit=limit,
            since_token=since_token,
            include_all_networks=include_all_networks,
            third_party_instance_id=third_party_instance_id,
        )

        if search_filter:
            res = {
                "chunk": [
                    entry for entry in list(res.get("chunk", []))
                    if _matches_room_entry(entry, search_filter)
                ]
            }

        defer.returnValue(res)

    def _get_remote_list_cached(
        self,
        server_name,
        limit=None,
        since_token=None,
        search_filter=None,
        include_all_networks=False,
        third_party_instance_id=None,
    ):
        repl_layer = self.hs.get_replication_layer()
        if search_filter:
            # We can't cache when asking for search
            return repl_layer.get_public_rooms(
                server_name,
                limit=limit,
                since_token=since_token,
                search_filter=search_filter,
                include_all_networks=include_all_networks,
                third_party_instance_id=third_party_instance_id,
            )

        key = (
            server_name,
            limit,
            since_token,
            include_all_networks,
            third_party_instance_id,
        )
        result = self.remote_response_cache.get(key)
        if not result:
            result = self.remote_response_cache.set(
                key,
                repl_layer.get_public_rooms(
                    server_name,
                    limit=limit,
                    since_token=since_token,
                    search_filter=search_filter,
                    include_all_networks=include_all_networks,
                    third_party_instance_id=third_party_instance_id,
                ))
        return result
Beispiel #2
0
class SyncHandler(object):
    def __init__(self, hs):
        self.store = hs.get_datastore()
        self.notifier = hs.get_notifier()
        self.presence_handler = hs.get_presence_handler()
        self.event_sources = hs.get_event_sources()
        self.clock = hs.get_clock()
        self.response_cache = ResponseCache()

    def wait_for_sync_for_user(self,
                               sync_config,
                               since_token=None,
                               timeout=0,
                               full_state=False):
        """Get the sync for a client if we have new data for it now. Otherwise
        wait for new data to arrive on the server. If the timeout expires, then
        return an empty sync result.
        Returns:
            A Deferred SyncResult.
        """
        result = self.response_cache.get(sync_config.request_key)
        if not result:
            result = self.response_cache.set(
                sync_config.request_key,
                self._wait_for_sync_for_user(sync_config, since_token, timeout,
                                             full_state))
        return result

    @defer.inlineCallbacks
    def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
                                full_state):
        context = LoggingContext.current_context()
        if context:
            if since_token is None:
                context.tag = "initial_sync"
            elif full_state:
                context.tag = "full_state_sync"
            else:
                context.tag = "incremental_sync"

        if timeout == 0 or since_token is None or full_state:
            # we are going to return immediately, so don't bother calling
            # notifier.wait_for_events.
            result = yield self.current_sync_for_user(
                sync_config,
                since_token,
                full_state=full_state,
            )
            defer.returnValue(result)
        else:

            def current_sync_callback(before_token, after_token):
                return self.current_sync_for_user(sync_config, since_token)

            result = yield self.notifier.wait_for_events(
                sync_config.user.to_string(),
                timeout,
                current_sync_callback,
                from_token=since_token,
            )
            defer.returnValue(result)

    def current_sync_for_user(self,
                              sync_config,
                              since_token=None,
                              full_state=False):
        """Get the sync for client needed to match what the server has now.
        Returns:
            A Deferred SyncResult.
        """
        return self.generate_sync_result(sync_config, since_token, full_state)

    @defer.inlineCallbacks
    def push_rules_for_user(self, user):
        user_id = user.to_string()
        rules = yield self.store.get_push_rules_for_user(user_id)
        rules = format_push_rules_for_user(user, rules)
        defer.returnValue(rules)

    @defer.inlineCallbacks
    def ephemeral_by_room(self, sync_config, now_token, since_token=None):
        """Get the ephemeral events for each room the user is in
        Args:
            sync_config (SyncConfig): The flags, filters and user for the sync.
            now_token (StreamToken): Where the server is currently up to.
            since_token (StreamToken): Where the server was when the client
                last synced.
        Returns:
            A tuple of the now StreamToken, updated to reflect the which typing
            events are included, and a dict mapping from room_id to a list of
            typing events for that room.
        """

        with Measure(self.clock, "ephemeral_by_room"):
            typing_key = since_token.typing_key if since_token else "0"

            rooms = yield self.store.get_rooms_for_user(
                sync_config.user.to_string())
            room_ids = [room.room_id for room in rooms]

            typing_source = self.event_sources.sources["typing"]
            typing, typing_key = yield typing_source.get_new_events(
                user=sync_config.user,
                from_key=typing_key,
                limit=sync_config.filter_collection.ephemeral_limit(),
                room_ids=room_ids,
                is_guest=sync_config.is_guest,
            )
            now_token = now_token.copy_and_replace("typing_key", typing_key)

            ephemeral_by_room = {}

            for event in typing:
                # we want to exclude the room_id from the event, but modifying the
                # result returned by the event source is poor form (it might cache
                # the object)
                room_id = event["room_id"]
                event_copy = {
                    k: v
                    for (k, v) in event.iteritems() if k != "room_id"
                }
                ephemeral_by_room.setdefault(room_id, []).append(event_copy)

            receipt_key = since_token.receipt_key if since_token else "0"

            receipt_source = self.event_sources.sources["receipt"]
            receipts, receipt_key = yield receipt_source.get_new_events(
                user=sync_config.user,
                from_key=receipt_key,
                limit=sync_config.filter_collection.ephemeral_limit(),
                room_ids=room_ids,
                is_guest=sync_config.is_guest,
            )
            now_token = now_token.copy_and_replace("receipt_key", receipt_key)

            for event in receipts:
                room_id = event["room_id"]
                # exclude room id, as above
                event_copy = {
                    k: v
                    for (k, v) in event.iteritems() if k != "room_id"
                }
                ephemeral_by_room.setdefault(room_id, []).append(event_copy)

        defer.returnValue((now_token, ephemeral_by_room))

    @defer.inlineCallbacks
    def _load_filtered_recents(self,
                               room_id,
                               sync_config,
                               now_token,
                               since_token=None,
                               recents=None,
                               newly_joined_room=False):
        """
        Returns:
            a Deferred TimelineBatch
        """
        with Measure(self.clock, "load_filtered_recents"):
            timeline_limit = sync_config.filter_collection.timeline_limit()

            if recents is None or newly_joined_room or timeline_limit < len(
                    recents):
                limited = True
            else:
                limited = False

            if recents:
                recents = sync_config.filter_collection.filter_room_timeline(
                    recents)
                recents = yield filter_events_for_client(
                    self.store,
                    sync_config.user.to_string(),
                    recents,
                )
            else:
                recents = []

            if not limited:
                defer.returnValue(
                    TimelineBatch(events=recents,
                                  prev_batch=now_token,
                                  limited=False))

            filtering_factor = 2
            load_limit = max(timeline_limit * filtering_factor, 10)
            max_repeat = 5  # Only try a few times per room, otherwise
            room_key = now_token.room_key
            end_key = room_key

            since_key = None
            if since_token and not newly_joined_room:
                since_key = since_token.room_key

            while limited and len(recents) < timeline_limit and max_repeat:
                events, end_key = yield self.store.get_room_events_stream_for_room(
                    room_id,
                    limit=load_limit + 1,
                    from_key=since_key,
                    to_key=end_key,
                )
                loaded_recents = sync_config.filter_collection.filter_room_timeline(
                    events)
                loaded_recents = yield filter_events_for_client(
                    self.store,
                    sync_config.user.to_string(),
                    loaded_recents,
                )
                loaded_recents.extend(recents)
                recents = loaded_recents

                if len(events) <= load_limit:
                    limited = False
                    break
                max_repeat -= 1

            if len(recents) > timeline_limit:
                limited = True
                recents = recents[-timeline_limit:]
                room_key = recents[0].internal_metadata.before

            prev_batch_token = now_token.copy_and_replace("room_key", room_key)

        defer.returnValue(
            TimelineBatch(events=recents,
                          prev_batch=prev_batch_token,
                          limited=limited or newly_joined_room))

    @defer.inlineCallbacks
    def get_state_after_event(self, event):
        """
        Get the room state after the given event

        Args:
            event(synapse.events.EventBase): event of interest

        Returns:
            A Deferred map from ((type, state_key)->Event)
        """
        state = yield self.store.get_state_for_event(event.event_id)
        if event.is_state():
            state = state.copy()
            state[(event.type, event.state_key)] = event
        defer.returnValue(state)

    @defer.inlineCallbacks
    def get_state_at(self, room_id, stream_position):
        """ Get the room state at a particular stream position

        Args:
            room_id(str): room for which to get state
            stream_position(StreamToken): point at which to get state

        Returns:
            A Deferred map from ((type, state_key)->Event)
        """
        last_events, token = yield self.store.get_recent_events_for_room(
            room_id,
            end_token=stream_position.room_key,
            limit=1,
        )

        if last_events:
            last_event = last_events[-1]
            state = yield self.get_state_after_event(last_event)

        else:
            # no events in this room - so presumably no state
            state = {}
        defer.returnValue(state)

    @defer.inlineCallbacks
    def compute_state_delta(self, room_id, batch, sync_config, since_token,
                            now_token, full_state):
        """ Works out the differnce in state between the start of the timeline
        and the previous sync.

        Args:
            room_id(str):
            batch(synapse.handlers.sync.TimelineBatch): The timeline batch for
                the room that will be sent to the user.
            sync_config(synapse.handlers.sync.SyncConfig):
            since_token(str|None): Token of the end of the previous batch. May
                be None.
            now_token(str): Token of the end of the current batch.
            full_state(bool): Whether to force returning the full state.

        Returns:
             A deferred new event dictionary
        """
        # TODO(mjark) Check if the state events were received by the server
        # after the previous sync, since we need to include those state
        # updates even if they occured logically before the previous event.
        # TODO(mjark) Check for new redactions in the state events.

        with Measure(self.clock, "compute_state_delta"):
            if full_state:
                if batch:
                    current_state = yield self.store.get_state_for_event(
                        batch.events[-1].event_id)

                    state = yield self.store.get_state_for_event(
                        batch.events[0].event_id)
                else:
                    current_state = yield self.get_state_at(
                        room_id, stream_position=now_token)

                    state = current_state

                timeline_state = {(event.type, event.state_key): event
                                  for event in batch.events
                                  if event.is_state()}

                state = _calculate_state(
                    timeline_contains=timeline_state,
                    timeline_start=state,
                    previous={},
                    current=current_state,
                )
            elif batch.limited:
                state_at_previous_sync = yield self.get_state_at(
                    room_id, stream_position=since_token)

                current_state = yield self.store.get_state_for_event(
                    batch.events[-1].event_id)

                state_at_timeline_start = yield self.store.get_state_for_event(
                    batch.events[0].event_id)

                timeline_state = {(event.type, event.state_key): event
                                  for event in batch.events
                                  if event.is_state()}

                state = _calculate_state(
                    timeline_contains=timeline_state,
                    timeline_start=state_at_timeline_start,
                    previous=state_at_previous_sync,
                    current=current_state,
                )
            else:
                state = {}

            defer.returnValue({
                (e.type, e.state_key): e
                for e in sync_config.filter_collection.filter_room_state(
                    state.values())
            })

    @defer.inlineCallbacks
    def unread_notifs_for_room_id(self, room_id, sync_config):
        with Measure(self.clock, "unread_notifs_for_room_id"):
            last_unread_event_id = yield self.store.get_last_receipt_event_id_for_user(
                user_id=sync_config.user.to_string(),
                room_id=room_id,
                receipt_type="m.read")

            notifs = []
            if last_unread_event_id:
                notifs = yield self.store.get_unread_event_push_actions_by_room_for_user(
                    room_id, sync_config.user.to_string(),
                    last_unread_event_id)
                defer.returnValue(notifs)

            # There is no new information in this period, so your notification
            # count is whatever it was last time.
            defer.returnValue(None)

    @defer.inlineCallbacks
    def generate_sync_result(self,
                             sync_config,
                             since_token=None,
                             full_state=False):
        """Generates a sync result.

        Args:
            sync_config (SyncConfig)
            since_token (StreamToken)
            full_state (bool)

        Returns:
            Deferred(SyncResult)
        """

        # NB: The now_token gets changed by some of the generate_sync_* methods,
        # this is due to some of the underlying streams not supporting the ability
        # to query up to a given point.
        # Always use the `now_token` in `SyncResultBuilder`
        now_token = yield self.event_sources.get_current_token()

        sync_result_builder = SyncResultBuilder(
            sync_config,
            full_state,
            since_token=since_token,
            now_token=now_token,
        )

        account_data_by_room = yield self._generate_sync_entry_for_account_data(
            sync_result_builder)

        res = yield self._generate_sync_entry_for_rooms(
            sync_result_builder, account_data_by_room)
        newly_joined_rooms, newly_joined_users = res

        yield self._generate_sync_entry_for_presence(sync_result_builder,
                                                     newly_joined_rooms,
                                                     newly_joined_users)

        defer.returnValue(
            SyncResult(
                presence=sync_result_builder.presence,
                account_data=sync_result_builder.account_data,
                joined=sync_result_builder.joined,
                invited=sync_result_builder.invited,
                archived=sync_result_builder.archived,
                next_batch=sync_result_builder.now_token,
            ))

    @defer.inlineCallbacks
    def _generate_sync_entry_for_account_data(self, sync_result_builder):
        """Generates the account data portion of the sync response. Populates
        `sync_result_builder` with the result.

        Args:
            sync_result_builder(SyncResultBuilder)

        Returns:
            Deferred(dict): A dictionary containing the per room account data.
        """
        sync_config = sync_result_builder.sync_config
        user_id = sync_result_builder.sync_config.user.to_string()
        since_token = sync_result_builder.since_token

        if since_token and not sync_result_builder.full_state:
            account_data, account_data_by_room = (
                yield self.store.get_updated_account_data_for_user(
                    user_id,
                    since_token.account_data_key,
                ))

            push_rules_changed = yield self.store.have_push_rules_changed_for_user(
                user_id, int(since_token.push_rules_key))

            if push_rules_changed:
                account_data["m.push_rules"] = yield self.push_rules_for_user(
                    sync_config.user)
        else:
            account_data, account_data_by_room = (
                yield self.store.get_account_data_for_user(
                    sync_config.user.to_string()))

            account_data['m.push_rules'] = yield self.push_rules_for_user(
                sync_config.user)

        account_data_for_user = sync_config.filter_collection.filter_account_data(
            [{
                "type": account_data_type,
                "content": content
            } for account_data_type, content in account_data.items()])

        sync_result_builder.account_data = account_data_for_user

        defer.returnValue(account_data_by_room)

    @defer.inlineCallbacks
    def _generate_sync_entry_for_presence(self, sync_result_builder,
                                          newly_joined_rooms,
                                          newly_joined_users):
        """Generates the presence portion of the sync response. Populates the
        `sync_result_builder` with the result.

        Args:
            sync_result_builder(SyncResultBuilder)
            newly_joined_rooms(list): List of rooms that the user has joined
                since the last sync (or empty if an initial sync)
            newly_joined_users(list): List of users that have joined rooms
                since the last sync (or empty if an initial sync)
        """
        now_token = sync_result_builder.now_token
        sync_config = sync_result_builder.sync_config
        user = sync_result_builder.sync_config.user

        presence_source = self.event_sources.sources["presence"]

        since_token = sync_result_builder.since_token
        if since_token and not sync_result_builder.full_state:
            presence_key = since_token.presence_key
            include_offline = True
        else:
            presence_key = None
            include_offline = False

        presence, presence_key = yield presence_source.get_new_events(
            user=user,
            from_key=presence_key,
            is_guest=sync_config.is_guest,
            include_offline=include_offline,
        )
        sync_result_builder.now_token = now_token.copy_and_replace(
            "presence_key", presence_key)

        extra_users_ids = set(newly_joined_users)
        for room_id in newly_joined_rooms:
            users = yield self.store.get_users_in_room(room_id)
            extra_users_ids.update(users)
        extra_users_ids.discard(user.to_string())

        states = yield self.presence_handler.get_states(
            extra_users_ids,
            as_event=True,
        )
        presence.extend(states)

        # Deduplicate the presence entries so that there's at most one per user
        presence = {p["content"]["user_id"]: p for p in presence}.values()

        presence = sync_config.filter_collection.filter_presence(presence)

        sync_result_builder.presence = presence

    @defer.inlineCallbacks
    def _generate_sync_entry_for_rooms(self, sync_result_builder,
                                       account_data_by_room):
        """Generates the rooms portion of the sync response. Populates the
        `sync_result_builder` with the result.

        Args:
            sync_result_builder(SyncResultBuilder)
            account_data_by_room(dict): Dictionary of per room account data

        Returns:
            Deferred(tuple): Returns a 2-tuple of
            `(newly_joined_rooms, newly_joined_users)`
        """
        user_id = sync_result_builder.sync_config.user.to_string()

        now_token, ephemeral_by_room = yield self.ephemeral_by_room(
            sync_result_builder.sync_config,
            now_token=sync_result_builder.now_token,
            since_token=sync_result_builder.since_token,
        )
        sync_result_builder.now_token = now_token

        ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
            "m.ignored_user_list",
            user_id=user_id,
        )

        if ignored_account_data:
            ignored_users = ignored_account_data.get("ignored_users",
                                                     {}).keys()
        else:
            ignored_users = frozenset()

        if sync_result_builder.since_token:
            res = yield self._get_rooms_changed(sync_result_builder,
                                                ignored_users)
            room_entries, invited, newly_joined_rooms = res

            tags_by_room = yield self.store.get_updated_tags(
                user_id,
                sync_result_builder.since_token.account_data_key,
            )
        else:
            res = yield self._get_all_rooms(sync_result_builder, ignored_users)
            room_entries, invited, newly_joined_rooms = res

            tags_by_room = yield self.store.get_tags_for_user(user_id)

        def handle_room_entries(room_entry):
            return self._generate_room_entry(
                sync_result_builder,
                ignored_users,
                room_entry,
                ephemeral=ephemeral_by_room.get(room_entry.room_id, []),
                tags=tags_by_room.get(room_entry.room_id),
                account_data=account_data_by_room.get(room_entry.room_id, {}),
                always_include=sync_result_builder.full_state,
            )

        yield concurrently_execute(handle_room_entries, room_entries, 10)

        sync_result_builder.invited.extend(invited)

        # Now we want to get any newly joined users
        newly_joined_users = set()
        if sync_result_builder.since_token:
            for joined_sync in sync_result_builder.joined:
                it = itertools.chain(joined_sync.timeline.events,
                                     joined_sync.state.values())
                for event in it:
                    if event.type == EventTypes.Member:
                        if event.membership == Membership.JOIN:
                            newly_joined_users.add(event.state_key)

        defer.returnValue((newly_joined_rooms, newly_joined_users))

    @defer.inlineCallbacks
    def _get_rooms_changed(self, sync_result_builder, ignored_users):
        """Gets the the changes that have happened since the last sync.

        Args:
            sync_result_builder(SyncResultBuilder)
            ignored_users(set(str)): Set of users ignored by user.

        Returns:
            Deferred(tuple): Returns a tuple of the form:
            `([RoomSyncResultBuilder], [InvitedSyncResult], newly_joined_rooms)`
        """
        user_id = sync_result_builder.sync_config.user.to_string()
        since_token = sync_result_builder.since_token
        now_token = sync_result_builder.now_token
        sync_config = sync_result_builder.sync_config

        assert since_token

        app_service = yield self.store.get_app_service_by_user_id(user_id)
        if app_service:
            rooms = yield self.store.get_app_service_rooms(app_service)
            joined_room_ids = set(r.room_id for r in rooms)
        else:
            rooms = yield self.store.get_rooms_for_user(user_id)
            joined_room_ids = set(r.room_id for r in rooms)

        # Get a list of membership change events that have happened.
        rooms_changed = yield self.store.get_membership_changes_for_user(
            user_id, since_token.room_key, now_token.room_key)

        mem_change_events_by_room_id = {}
        for event in rooms_changed:
            mem_change_events_by_room_id.setdefault(event.room_id,
                                                    []).append(event)

        newly_joined_rooms = []
        room_entries = []
        invited = []
        for room_id, events in mem_change_events_by_room_id.items():
            non_joins = [e for e in events if e.membership != Membership.JOIN]
            has_join = len(non_joins) != len(events)

            # We want to figure out if we joined the room at some point since
            # the last sync (even if we have since left). This is to make sure
            # we do send down the room, and with full state, where necessary
            if room_id in joined_room_ids or has_join:
                old_state = yield self.get_state_at(room_id, since_token)
                old_mem_ev = old_state.get((EventTypes.Member, user_id), None)
                if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
                    newly_joined_rooms.append(room_id)

                if room_id in joined_room_ids:
                    continue

            if not non_joins:
                continue

            # Only bother if we're still currently invited
            should_invite = non_joins[-1].membership == Membership.INVITE
            if should_invite:
                if event.sender not in ignored_users:
                    room_sync = InvitedSyncResult(room_id,
                                                  invite=non_joins[-1])
                    if room_sync:
                        invited.append(room_sync)

            # Always include leave/ban events. Just take the last one.
            # TODO: How do we handle ban -> leave in same batch?
            leave_events = [
                e for e in non_joins
                if e.membership in (Membership.LEAVE, Membership.BAN)
            ]

            if leave_events:
                leave_event = leave_events[-1]
                leave_stream_token = yield self.store.get_stream_token_for_event(
                    leave_event.event_id)
                leave_token = since_token.copy_and_replace(
                    "room_key", leave_stream_token)

                if since_token and since_token.is_after(leave_token):
                    continue

                room_entries.append(
                    RoomSyncResultBuilder(
                        room_id=room_id,
                        rtype="archived",
                        events=None,
                        newly_joined=room_id in newly_joined_rooms,
                        full_state=False,
                        since_token=since_token,
                        upto_token=leave_token,
                    ))

        timeline_limit = sync_config.filter_collection.timeline_limit()

        # Get all events for rooms we're currently joined to.
        room_to_events = yield self.store.get_room_events_stream_for_rooms(
            room_ids=joined_room_ids,
            from_key=since_token.room_key,
            to_key=now_token.room_key,
            limit=timeline_limit + 1,
        )

        # We loop through all room ids, even if there are no new events, in case
        # there are non room events taht we need to notify about.
        for room_id in joined_room_ids:
            room_entry = room_to_events.get(room_id, None)

            if room_entry:
                events, start_key = room_entry

                prev_batch_token = now_token.copy_and_replace(
                    "room_key", start_key)

                room_entries.append(
                    RoomSyncResultBuilder(
                        room_id=room_id,
                        rtype="joined",
                        events=events,
                        newly_joined=room_id in newly_joined_rooms,
                        full_state=False,
                        since_token=None
                        if room_id in newly_joined_rooms else since_token,
                        upto_token=prev_batch_token,
                    ))
            else:
                room_entries.append(
                    RoomSyncResultBuilder(
                        room_id=room_id,
                        rtype="joined",
                        events=[],
                        newly_joined=room_id in newly_joined_rooms,
                        full_state=False,
                        since_token=since_token,
                        upto_token=since_token,
                    ))

        defer.returnValue((room_entries, invited, newly_joined_rooms))

    @defer.inlineCallbacks
    def _get_all_rooms(self, sync_result_builder, ignored_users):
        """Returns entries for all rooms for the user.

        Args:
            sync_result_builder(SyncResultBuilder)
            ignored_users(set(str)): Set of users ignored by user.

        Returns:
            Deferred(tuple): Returns a tuple of the form:
            `([RoomSyncResultBuilder], [InvitedSyncResult], [])`
        """

        user_id = sync_result_builder.sync_config.user.to_string()
        since_token = sync_result_builder.since_token
        now_token = sync_result_builder.now_token
        sync_config = sync_result_builder.sync_config

        membership_list = (Membership.INVITE, Membership.JOIN,
                           Membership.LEAVE, Membership.BAN)

        room_list = yield self.store.get_rooms_for_user_where_membership_is(
            user_id=user_id, membership_list=membership_list)

        room_entries = []
        invited = []

        for event in room_list:
            if event.membership == Membership.JOIN:
                room_entries.append(
                    RoomSyncResultBuilder(
                        room_id=event.room_id,
                        rtype="joined",
                        events=None,
                        newly_joined=False,
                        full_state=True,
                        since_token=since_token,
                        upto_token=now_token,
                    ))
            elif event.membership == Membership.INVITE:
                if event.sender in ignored_users:
                    continue
                invite = yield self.store.get_event(event.event_id)
                invited.append(
                    InvitedSyncResult(
                        room_id=event.room_id,
                        invite=invite,
                    ))
            elif event.membership in (Membership.LEAVE, Membership.BAN):
                # Always send down rooms we were banned or kicked from.
                if not sync_config.filter_collection.include_leave:
                    if event.membership == Membership.LEAVE:
                        if user_id == event.sender:
                            continue

                leave_token = now_token.copy_and_replace(
                    "room_key", "s%d" % (event.stream_ordering, ))
                room_entries.append(
                    RoomSyncResultBuilder(
                        room_id=event.room_id,
                        rtype="archived",
                        events=None,
                        newly_joined=False,
                        full_state=True,
                        since_token=since_token,
                        upto_token=leave_token,
                    ))

        defer.returnValue((room_entries, invited, []))

    @defer.inlineCallbacks
    def _generate_room_entry(self,
                             sync_result_builder,
                             ignored_users,
                             room_builder,
                             ephemeral,
                             tags,
                             account_data,
                             always_include=False):
        """Populates the `joined` and `archived` section of `sync_result_builder`
        based on the `room_builder`.

        Args:
            sync_result_builder(SyncResultBuilder)
            ignored_users(set(str)): Set of users ignored by user.
            room_builder(RoomSyncResultBuilder)
            ephemeral(list): List of new ephemeral events for room
            tags(list): List of *all* tags for room, or None if there has been
                no change.
            account_data(list): List of new account data for room
            always_include(bool): Always include this room in the sync response,
                even if empty.
        """
        newly_joined = room_builder.newly_joined
        full_state = (room_builder.full_state or newly_joined
                      or sync_result_builder.full_state)
        events = room_builder.events

        # We want to shortcut out as early as possible.
        if not (always_include or account_data or ephemeral or full_state):
            if events == [] and tags is None:
                return

        since_token = sync_result_builder.since_token
        now_token = sync_result_builder.now_token
        sync_config = sync_result_builder.sync_config

        room_id = room_builder.room_id
        since_token = room_builder.since_token
        upto_token = room_builder.upto_token

        batch = yield self._load_filtered_recents(
            room_id,
            sync_config,
            now_token=upto_token,
            since_token=since_token,
            recents=events,
            newly_joined_room=newly_joined,
        )

        account_data_events = []
        if tags is not None:
            account_data_events.append({
                "type": "m.tag",
                "content": {
                    "tags": tags
                },
            })

        for account_data_type, content in account_data.items():
            account_data_events.append({
                "type": account_data_type,
                "content": content,
            })

        account_data = sync_config.filter_collection.filter_room_account_data(
            account_data_events)

        ephemeral = sync_config.filter_collection.filter_room_ephemeral(
            ephemeral)

        if not (always_include or batch or account_data or ephemeral
                or full_state):
            return

        state = yield self.compute_state_delta(room_id,
                                               batch,
                                               sync_config,
                                               since_token,
                                               now_token,
                                               full_state=full_state)

        if room_builder.rtype == "joined":
            unread_notifications = {}
            room_sync = JoinedSyncResult(
                room_id=room_id,
                timeline=batch,
                state=state,
                ephemeral=ephemeral,
                account_data=account_data_events,
                unread_notifications=unread_notifications,
            )

            if room_sync or always_include:
                notifs = yield self.unread_notifs_for_room_id(
                    room_id, sync_config)

                if notifs is not None:
                    unread_notifications["notification_count"] = notifs[
                        "notify_count"]
                    unread_notifications["highlight_count"] = notifs[
                        "highlight_count"]

                sync_result_builder.joined.append(room_sync)
        elif room_builder.rtype == "archived":
            room_sync = ArchivedSyncResult(
                room_id=room_id,
                timeline=batch,
                state=state,
                account_data=account_data,
            )
            if room_sync or always_include:
                sync_result_builder.archived.append(room_sync)
        else:
            raise Exception("Unrecognized rtype: %r", room_builder.rtype)
Beispiel #3
0
class SyncHandler(object):

    def __init__(self, hs):
        self.store = hs.get_datastore()
        self.notifier = hs.get_notifier()
        self.presence_handler = hs.get_presence_handler()
        self.event_sources = hs.get_event_sources()
        self.clock = hs.get_clock()
        self.response_cache = ResponseCache()

    def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
                               full_state=False):
        """Get the sync for a client if we have new data for it now. Otherwise
        wait for new data to arrive on the server. If the timeout expires, then
        return an empty sync result.
        Returns:
            A Deferred SyncResult.
        """
        result = self.response_cache.get(sync_config.request_key)
        if not result:
            result = self.response_cache.set(
                sync_config.request_key,
                self._wait_for_sync_for_user(
                    sync_config, since_token, timeout, full_state
                )
            )
        return result

    @defer.inlineCallbacks
    def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
                                full_state):
        context = LoggingContext.current_context()
        if context:
            if since_token is None:
                context.tag = "initial_sync"
            elif full_state:
                context.tag = "full_state_sync"
            else:
                context.tag = "incremental_sync"

        if timeout == 0 or since_token is None or full_state:
            # we are going to return immediately, so don't bother calling
            # notifier.wait_for_events.
            result = yield self.current_sync_for_user(
                sync_config, since_token, full_state=full_state,
            )
            defer.returnValue(result)
        else:
            def current_sync_callback(before_token, after_token):
                return self.current_sync_for_user(sync_config, since_token)

            result = yield self.notifier.wait_for_events(
                sync_config.user.to_string(), timeout, current_sync_callback,
                from_token=since_token,
            )
            defer.returnValue(result)

    def current_sync_for_user(self, sync_config, since_token=None,
                              full_state=False):
        """Get the sync for client needed to match what the server has now.
        Returns:
            A Deferred SyncResult.
        """
        return self.generate_sync_result(sync_config, since_token, full_state)

    @defer.inlineCallbacks
    def push_rules_for_user(self, user):
        user_id = user.to_string()
        rules = yield self.store.get_push_rules_for_user(user_id)
        rules = format_push_rules_for_user(user, rules)
        defer.returnValue(rules)

    @defer.inlineCallbacks
    def ephemeral_by_room(self, sync_config, now_token, since_token=None):
        """Get the ephemeral events for each room the user is in
        Args:
            sync_config (SyncConfig): The flags, filters and user for the sync.
            now_token (StreamToken): Where the server is currently up to.
            since_token (StreamToken): Where the server was when the client
                last synced.
        Returns:
            A tuple of the now StreamToken, updated to reflect the which typing
            events are included, and a dict mapping from room_id to a list of
            typing events for that room.
        """

        with Measure(self.clock, "ephemeral_by_room"):
            typing_key = since_token.typing_key if since_token else "0"

            rooms = yield self.store.get_rooms_for_user(sync_config.user.to_string())
            room_ids = [room.room_id for room in rooms]

            typing_source = self.event_sources.sources["typing"]
            typing, typing_key = yield typing_source.get_new_events(
                user=sync_config.user,
                from_key=typing_key,
                limit=sync_config.filter_collection.ephemeral_limit(),
                room_ids=room_ids,
                is_guest=sync_config.is_guest,
            )
            now_token = now_token.copy_and_replace("typing_key", typing_key)

            ephemeral_by_room = {}

            for event in typing:
                # we want to exclude the room_id from the event, but modifying the
                # result returned by the event source is poor form (it might cache
                # the object)
                room_id = event["room_id"]
                event_copy = {k: v for (k, v) in event.iteritems()
                              if k != "room_id"}
                ephemeral_by_room.setdefault(room_id, []).append(event_copy)

            receipt_key = since_token.receipt_key if since_token else "0"

            receipt_source = self.event_sources.sources["receipt"]
            receipts, receipt_key = yield receipt_source.get_new_events(
                user=sync_config.user,
                from_key=receipt_key,
                limit=sync_config.filter_collection.ephemeral_limit(),
                room_ids=room_ids,
                is_guest=sync_config.is_guest,
            )
            now_token = now_token.copy_and_replace("receipt_key", receipt_key)

            for event in receipts:
                room_id = event["room_id"]
                # exclude room id, as above
                event_copy = {k: v for (k, v) in event.iteritems()
                              if k != "room_id"}
                ephemeral_by_room.setdefault(room_id, []).append(event_copy)

        defer.returnValue((now_token, ephemeral_by_room))

    @defer.inlineCallbacks
    def _load_filtered_recents(self, room_id, sync_config, now_token,
                               since_token=None, recents=None, newly_joined_room=False):
        """
        Returns:
            a Deferred TimelineBatch
        """
        with Measure(self.clock, "load_filtered_recents"):
            timeline_limit = sync_config.filter_collection.timeline_limit()

            if recents is None or newly_joined_room or timeline_limit < len(recents):
                limited = True
            else:
                limited = False

            if recents:
                recents = sync_config.filter_collection.filter_room_timeline(recents)
                recents = yield filter_events_for_client(
                    self.store,
                    sync_config.user.to_string(),
                    recents,
                )
            else:
                recents = []

            if not limited:
                defer.returnValue(TimelineBatch(
                    events=recents,
                    prev_batch=now_token,
                    limited=False
                ))

            filtering_factor = 2
            load_limit = max(timeline_limit * filtering_factor, 10)
            max_repeat = 5  # Only try a few times per room, otherwise
            room_key = now_token.room_key
            end_key = room_key

            since_key = None
            if since_token and not newly_joined_room:
                since_key = since_token.room_key

            while limited and len(recents) < timeline_limit and max_repeat:
                events, end_key = yield self.store.get_room_events_stream_for_room(
                    room_id,
                    limit=load_limit + 1,
                    from_key=since_key,
                    to_key=end_key,
                )
                loaded_recents = sync_config.filter_collection.filter_room_timeline(
                    events
                )
                loaded_recents = yield filter_events_for_client(
                    self.store,
                    sync_config.user.to_string(),
                    loaded_recents,
                )
                loaded_recents.extend(recents)
                recents = loaded_recents

                if len(events) <= load_limit:
                    limited = False
                    break
                max_repeat -= 1

            if len(recents) > timeline_limit:
                limited = True
                recents = recents[-timeline_limit:]
                room_key = recents[0].internal_metadata.before

            prev_batch_token = now_token.copy_and_replace(
                "room_key", room_key
            )

        defer.returnValue(TimelineBatch(
            events=recents,
            prev_batch=prev_batch_token,
            limited=limited or newly_joined_room
        ))

    @defer.inlineCallbacks
    def get_state_after_event(self, event):
        """
        Get the room state after the given event

        Args:
            event(synapse.events.EventBase): event of interest

        Returns:
            A Deferred map from ((type, state_key)->Event)
        """
        state = yield self.store.get_state_for_event(event.event_id)
        if event.is_state():
            state = state.copy()
            state[(event.type, event.state_key)] = event
        defer.returnValue(state)

    @defer.inlineCallbacks
    def get_state_at(self, room_id, stream_position):
        """ Get the room state at a particular stream position

        Args:
            room_id(str): room for which to get state
            stream_position(StreamToken): point at which to get state

        Returns:
            A Deferred map from ((type, state_key)->Event)
        """
        last_events, token = yield self.store.get_recent_events_for_room(
            room_id, end_token=stream_position.room_key, limit=1,
        )

        if last_events:
            last_event = last_events[-1]
            state = yield self.get_state_after_event(last_event)

        else:
            # no events in this room - so presumably no state
            state = {}
        defer.returnValue(state)

    @defer.inlineCallbacks
    def compute_state_delta(self, room_id, batch, sync_config, since_token, now_token,
                            full_state):
        """ Works out the differnce in state between the start of the timeline
        and the previous sync.

        Args:
            room_id(str):
            batch(synapse.handlers.sync.TimelineBatch): The timeline batch for
                the room that will be sent to the user.
            sync_config(synapse.handlers.sync.SyncConfig):
            since_token(str|None): Token of the end of the previous batch. May
                be None.
            now_token(str): Token of the end of the current batch.
            full_state(bool): Whether to force returning the full state.

        Returns:
             A deferred new event dictionary
        """
        # TODO(mjark) Check if the state events were received by the server
        # after the previous sync, since we need to include those state
        # updates even if they occured logically before the previous event.
        # TODO(mjark) Check for new redactions in the state events.

        with Measure(self.clock, "compute_state_delta"):
            if full_state:
                if batch:
                    current_state = yield self.store.get_state_for_event(
                        batch.events[-1].event_id
                    )

                    state = yield self.store.get_state_for_event(
                        batch.events[0].event_id
                    )
                else:
                    current_state = yield self.get_state_at(
                        room_id, stream_position=now_token
                    )

                    state = current_state

                timeline_state = {
                    (event.type, event.state_key): event
                    for event in batch.events if event.is_state()
                }

                state = _calculate_state(
                    timeline_contains=timeline_state,
                    timeline_start=state,
                    previous={},
                    current=current_state,
                )
            elif batch.limited:
                state_at_previous_sync = yield self.get_state_at(
                    room_id, stream_position=since_token
                )

                current_state = yield self.store.get_state_for_event(
                    batch.events[-1].event_id
                )

                state_at_timeline_start = yield self.store.get_state_for_event(
                    batch.events[0].event_id
                )

                timeline_state = {
                    (event.type, event.state_key): event
                    for event in batch.events if event.is_state()
                }

                state = _calculate_state(
                    timeline_contains=timeline_state,
                    timeline_start=state_at_timeline_start,
                    previous=state_at_previous_sync,
                    current=current_state,
                )
            else:
                state = {}

            defer.returnValue({
                (e.type, e.state_key): e
                for e in sync_config.filter_collection.filter_room_state(state.values())
            })

    @defer.inlineCallbacks
    def unread_notifs_for_room_id(self, room_id, sync_config):
        with Measure(self.clock, "unread_notifs_for_room_id"):
            last_unread_event_id = yield self.store.get_last_receipt_event_id_for_user(
                user_id=sync_config.user.to_string(),
                room_id=room_id,
                receipt_type="m.read"
            )

            notifs = []
            if last_unread_event_id:
                notifs = yield self.store.get_unread_event_push_actions_by_room_for_user(
                    room_id, sync_config.user.to_string(), last_unread_event_id
                )
                defer.returnValue(notifs)

            # There is no new information in this period, so your notification
            # count is whatever it was last time.
            defer.returnValue(None)

    @defer.inlineCallbacks
    def generate_sync_result(self, sync_config, since_token=None, full_state=False):
        """Generates a sync result.

        Args:
            sync_config (SyncConfig)
            since_token (StreamToken)
            full_state (bool)

        Returns:
            Deferred(SyncResult)
        """

        # NB: The now_token gets changed by some of the generate_sync_* methods,
        # this is due to some of the underlying streams not supporting the ability
        # to query up to a given point.
        # Always use the `now_token` in `SyncResultBuilder`
        now_token = yield self.event_sources.get_current_token()

        sync_result_builder = SyncResultBuilder(
            sync_config, full_state,
            since_token=since_token,
            now_token=now_token,
        )

        account_data_by_room = yield self._generate_sync_entry_for_account_data(
            sync_result_builder
        )

        res = yield self._generate_sync_entry_for_rooms(
            sync_result_builder, account_data_by_room
        )
        newly_joined_rooms, newly_joined_users = res

        yield self._generate_sync_entry_for_presence(
            sync_result_builder, newly_joined_rooms, newly_joined_users
        )

        defer.returnValue(SyncResult(
            presence=sync_result_builder.presence,
            account_data=sync_result_builder.account_data,
            joined=sync_result_builder.joined,
            invited=sync_result_builder.invited,
            archived=sync_result_builder.archived,
            next_batch=sync_result_builder.now_token,
        ))

    @defer.inlineCallbacks
    def _generate_sync_entry_for_account_data(self, sync_result_builder):
        """Generates the account data portion of the sync response. Populates
        `sync_result_builder` with the result.

        Args:
            sync_result_builder(SyncResultBuilder)

        Returns:
            Deferred(dict): A dictionary containing the per room account data.
        """
        sync_config = sync_result_builder.sync_config
        user_id = sync_result_builder.sync_config.user.to_string()
        since_token = sync_result_builder.since_token

        if since_token and not sync_result_builder.full_state:
            account_data, account_data_by_room = (
                yield self.store.get_updated_account_data_for_user(
                    user_id,
                    since_token.account_data_key,
                )
            )

            push_rules_changed = yield self.store.have_push_rules_changed_for_user(
                user_id, int(since_token.push_rules_key)
            )

            if push_rules_changed:
                account_data["m.push_rules"] = yield self.push_rules_for_user(
                    sync_config.user
                )
        else:
            account_data, account_data_by_room = (
                yield self.store.get_account_data_for_user(
                    sync_config.user.to_string()
                )
            )

            account_data['m.push_rules'] = yield self.push_rules_for_user(
                sync_config.user
            )

        account_data_for_user = sync_config.filter_collection.filter_account_data([
            {"type": account_data_type, "content": content}
            for account_data_type, content in account_data.items()
        ])

        sync_result_builder.account_data = account_data_for_user

        defer.returnValue(account_data_by_room)

    @defer.inlineCallbacks
    def _generate_sync_entry_for_presence(self, sync_result_builder, newly_joined_rooms,
                                          newly_joined_users):
        """Generates the presence portion of the sync response. Populates the
        `sync_result_builder` with the result.

        Args:
            sync_result_builder(SyncResultBuilder)
            newly_joined_rooms(list): List of rooms that the user has joined
                since the last sync (or empty if an initial sync)
            newly_joined_users(list): List of users that have joined rooms
                since the last sync (or empty if an initial sync)
        """
        now_token = sync_result_builder.now_token
        sync_config = sync_result_builder.sync_config
        user = sync_result_builder.sync_config.user

        presence_source = self.event_sources.sources["presence"]

        since_token = sync_result_builder.since_token
        if since_token and not sync_result_builder.full_state:
            presence_key = since_token.presence_key
            include_offline = True
        else:
            presence_key = None
            include_offline = False

        presence, presence_key = yield presence_source.get_new_events(
            user=user,
            from_key=presence_key,
            is_guest=sync_config.is_guest,
            include_offline=include_offline,
        )
        sync_result_builder.now_token = now_token.copy_and_replace(
            "presence_key", presence_key
        )

        extra_users_ids = set(newly_joined_users)
        for room_id in newly_joined_rooms:
            users = yield self.store.get_users_in_room(room_id)
            extra_users_ids.update(users)
        extra_users_ids.discard(user.to_string())

        states = yield self.presence_handler.get_states(
            extra_users_ids,
            as_event=True,
        )
        presence.extend(states)

        # Deduplicate the presence entries so that there's at most one per user
        presence = {p["content"]["user_id"]: p for p in presence}.values()

        presence = sync_config.filter_collection.filter_presence(
            presence
        )

        sync_result_builder.presence = presence

    @defer.inlineCallbacks
    def _generate_sync_entry_for_rooms(self, sync_result_builder, account_data_by_room):
        """Generates the rooms portion of the sync response. Populates the
        `sync_result_builder` with the result.

        Args:
            sync_result_builder(SyncResultBuilder)
            account_data_by_room(dict): Dictionary of per room account data

        Returns:
            Deferred(tuple): Returns a 2-tuple of
            `(newly_joined_rooms, newly_joined_users)`
        """
        user_id = sync_result_builder.sync_config.user.to_string()

        now_token, ephemeral_by_room = yield self.ephemeral_by_room(
            sync_result_builder.sync_config,
            now_token=sync_result_builder.now_token,
            since_token=sync_result_builder.since_token,
        )
        sync_result_builder.now_token = now_token

        ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
            "m.ignored_user_list", user_id=user_id,
        )

        if ignored_account_data:
            ignored_users = ignored_account_data.get("ignored_users", {}).keys()
        else:
            ignored_users = frozenset()

        if sync_result_builder.since_token:
            res = yield self._get_rooms_changed(sync_result_builder, ignored_users)
            room_entries, invited, newly_joined_rooms = res

            tags_by_room = yield self.store.get_updated_tags(
                user_id,
                sync_result_builder.since_token.account_data_key,
            )
        else:
            res = yield self._get_all_rooms(sync_result_builder, ignored_users)
            room_entries, invited, newly_joined_rooms = res

            tags_by_room = yield self.store.get_tags_for_user(user_id)

        def handle_room_entries(room_entry):
            return self._generate_room_entry(
                sync_result_builder,
                ignored_users,
                room_entry,
                ephemeral=ephemeral_by_room.get(room_entry.room_id, []),
                tags=tags_by_room.get(room_entry.room_id),
                account_data=account_data_by_room.get(room_entry.room_id, {}),
                always_include=sync_result_builder.full_state,
            )

        yield concurrently_execute(handle_room_entries, room_entries, 10)

        sync_result_builder.invited.extend(invited)

        # Now we want to get any newly joined users
        newly_joined_users = set()
        if sync_result_builder.since_token:
            for joined_sync in sync_result_builder.joined:
                it = itertools.chain(
                    joined_sync.timeline.events, joined_sync.state.values()
                )
                for event in it:
                    if event.type == EventTypes.Member:
                        if event.membership == Membership.JOIN:
                            newly_joined_users.add(event.state_key)

        defer.returnValue((newly_joined_rooms, newly_joined_users))

    @defer.inlineCallbacks
    def _get_rooms_changed(self, sync_result_builder, ignored_users):
        """Gets the the changes that have happened since the last sync.

        Args:
            sync_result_builder(SyncResultBuilder)
            ignored_users(set(str)): Set of users ignored by user.

        Returns:
            Deferred(tuple): Returns a tuple of the form:
            `([RoomSyncResultBuilder], [InvitedSyncResult], newly_joined_rooms)`
        """
        user_id = sync_result_builder.sync_config.user.to_string()
        since_token = sync_result_builder.since_token
        now_token = sync_result_builder.now_token
        sync_config = sync_result_builder.sync_config

        assert since_token

        app_service = yield self.store.get_app_service_by_user_id(user_id)
        if app_service:
            rooms = yield self.store.get_app_service_rooms(app_service)
            joined_room_ids = set(r.room_id for r in rooms)
        else:
            rooms = yield self.store.get_rooms_for_user(user_id)
            joined_room_ids = set(r.room_id for r in rooms)

        # Get a list of membership change events that have happened.
        rooms_changed = yield self.store.get_membership_changes_for_user(
            user_id, since_token.room_key, now_token.room_key
        )

        mem_change_events_by_room_id = {}
        for event in rooms_changed:
            mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)

        newly_joined_rooms = []
        room_entries = []
        invited = []
        for room_id, events in mem_change_events_by_room_id.items():
            non_joins = [e for e in events if e.membership != Membership.JOIN]
            has_join = len(non_joins) != len(events)

            # We want to figure out if we joined the room at some point since
            # the last sync (even if we have since left). This is to make sure
            # we do send down the room, and with full state, where necessary
            if room_id in joined_room_ids or has_join:
                old_state = yield self.get_state_at(room_id, since_token)
                old_mem_ev = old_state.get((EventTypes.Member, user_id), None)
                if not old_mem_ev or old_mem_ev.membership != Membership.JOIN:
                    newly_joined_rooms.append(room_id)

                if room_id in joined_room_ids:
                    continue

            if not non_joins:
                continue

            # Only bother if we're still currently invited
            should_invite = non_joins[-1].membership == Membership.INVITE
            if should_invite:
                if event.sender not in ignored_users:
                    room_sync = InvitedSyncResult(room_id, invite=non_joins[-1])
                    if room_sync:
                        invited.append(room_sync)

            # Always include leave/ban events. Just take the last one.
            # TODO: How do we handle ban -> leave in same batch?
            leave_events = [
                e for e in non_joins
                if e.membership in (Membership.LEAVE, Membership.BAN)
            ]

            if leave_events:
                leave_event = leave_events[-1]
                leave_stream_token = yield self.store.get_stream_token_for_event(
                    leave_event.event_id
                )
                leave_token = since_token.copy_and_replace(
                    "room_key", leave_stream_token
                )

                if since_token and since_token.is_after(leave_token):
                    continue

                room_entries.append(RoomSyncResultBuilder(
                    room_id=room_id,
                    rtype="archived",
                    events=None,
                    newly_joined=room_id in newly_joined_rooms,
                    full_state=False,
                    since_token=since_token,
                    upto_token=leave_token,
                ))

        timeline_limit = sync_config.filter_collection.timeline_limit()

        # Get all events for rooms we're currently joined to.
        room_to_events = yield self.store.get_room_events_stream_for_rooms(
            room_ids=joined_room_ids,
            from_key=since_token.room_key,
            to_key=now_token.room_key,
            limit=timeline_limit + 1,
        )

        # We loop through all room ids, even if there are no new events, in case
        # there are non room events taht we need to notify about.
        for room_id in joined_room_ids:
            room_entry = room_to_events.get(room_id, None)

            if room_entry:
                events, start_key = room_entry

                prev_batch_token = now_token.copy_and_replace("room_key", start_key)

                room_entries.append(RoomSyncResultBuilder(
                    room_id=room_id,
                    rtype="joined",
                    events=events,
                    newly_joined=room_id in newly_joined_rooms,
                    full_state=False,
                    since_token=None if room_id in newly_joined_rooms else since_token,
                    upto_token=prev_batch_token,
                ))
            else:
                room_entries.append(RoomSyncResultBuilder(
                    room_id=room_id,
                    rtype="joined",
                    events=[],
                    newly_joined=room_id in newly_joined_rooms,
                    full_state=False,
                    since_token=since_token,
                    upto_token=since_token,
                ))

        defer.returnValue((room_entries, invited, newly_joined_rooms))

    @defer.inlineCallbacks
    def _get_all_rooms(self, sync_result_builder, ignored_users):
        """Returns entries for all rooms for the user.

        Args:
            sync_result_builder(SyncResultBuilder)
            ignored_users(set(str)): Set of users ignored by user.

        Returns:
            Deferred(tuple): Returns a tuple of the form:
            `([RoomSyncResultBuilder], [InvitedSyncResult], [])`
        """

        user_id = sync_result_builder.sync_config.user.to_string()
        since_token = sync_result_builder.since_token
        now_token = sync_result_builder.now_token
        sync_config = sync_result_builder.sync_config

        membership_list = (
            Membership.INVITE, Membership.JOIN, Membership.LEAVE, Membership.BAN
        )

        room_list = yield self.store.get_rooms_for_user_where_membership_is(
            user_id=user_id,
            membership_list=membership_list
        )

        room_entries = []
        invited = []

        for event in room_list:
            if event.membership == Membership.JOIN:
                room_entries.append(RoomSyncResultBuilder(
                    room_id=event.room_id,
                    rtype="joined",
                    events=None,
                    newly_joined=False,
                    full_state=True,
                    since_token=since_token,
                    upto_token=now_token,
                ))
            elif event.membership == Membership.INVITE:
                if event.sender in ignored_users:
                    continue
                invite = yield self.store.get_event(event.event_id)
                invited.append(InvitedSyncResult(
                    room_id=event.room_id,
                    invite=invite,
                ))
            elif event.membership in (Membership.LEAVE, Membership.BAN):
                # Always send down rooms we were banned or kicked from.
                if not sync_config.filter_collection.include_leave:
                    if event.membership == Membership.LEAVE:
                        if user_id == event.sender:
                            continue

                leave_token = now_token.copy_and_replace(
                    "room_key", "s%d" % (event.stream_ordering,)
                )
                room_entries.append(RoomSyncResultBuilder(
                    room_id=event.room_id,
                    rtype="archived",
                    events=None,
                    newly_joined=False,
                    full_state=True,
                    since_token=since_token,
                    upto_token=leave_token,
                ))

        defer.returnValue((room_entries, invited, []))

    @defer.inlineCallbacks
    def _generate_room_entry(self, sync_result_builder, ignored_users,
                             room_builder, ephemeral, tags, account_data,
                             always_include=False):
        """Populates the `joined` and `archived` section of `sync_result_builder`
        based on the `room_builder`.

        Args:
            sync_result_builder(SyncResultBuilder)
            ignored_users(set(str)): Set of users ignored by user.
            room_builder(RoomSyncResultBuilder)
            ephemeral(list): List of new ephemeral events for room
            tags(list): List of *all* tags for room, or None if there has been
                no change.
            account_data(list): List of new account data for room
            always_include(bool): Always include this room in the sync response,
                even if empty.
        """
        newly_joined = room_builder.newly_joined
        full_state = (
            room_builder.full_state
            or newly_joined
            or sync_result_builder.full_state
        )
        events = room_builder.events

        # We want to shortcut out as early as possible.
        if not (always_include or account_data or ephemeral or full_state):
            if events == [] and tags is None:
                return

        since_token = sync_result_builder.since_token
        now_token = sync_result_builder.now_token
        sync_config = sync_result_builder.sync_config

        room_id = room_builder.room_id
        since_token = room_builder.since_token
        upto_token = room_builder.upto_token

        batch = yield self._load_filtered_recents(
            room_id, sync_config,
            now_token=upto_token,
            since_token=since_token,
            recents=events,
            newly_joined_room=newly_joined,
        )

        account_data_events = []
        if tags is not None:
            account_data_events.append({
                "type": "m.tag",
                "content": {"tags": tags},
            })

        for account_data_type, content in account_data.items():
            account_data_events.append({
                "type": account_data_type,
                "content": content,
            })

        account_data = sync_config.filter_collection.filter_room_account_data(
            account_data_events
        )

        ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral)

        if not (always_include or batch or account_data or ephemeral or full_state):
            return

        state = yield self.compute_state_delta(
            room_id, batch, sync_config, since_token, now_token,
            full_state=full_state
        )

        if room_builder.rtype == "joined":
            unread_notifications = {}
            room_sync = JoinedSyncResult(
                room_id=room_id,
                timeline=batch,
                state=state,
                ephemeral=ephemeral,
                account_data=account_data_events,
                unread_notifications=unread_notifications,
            )

            if room_sync or always_include:
                notifs = yield self.unread_notifs_for_room_id(
                    room_id, sync_config
                )

                if notifs is not None:
                    unread_notifications["notification_count"] = notifs["notify_count"]
                    unread_notifications["highlight_count"] = notifs["highlight_count"]

                sync_result_builder.joined.append(room_sync)
        elif room_builder.rtype == "archived":
            room_sync = ArchivedSyncResult(
                room_id=room_id,
                timeline=batch,
                state=state,
                account_data=account_data,
            )
            if room_sync or always_include:
                sync_result_builder.archived.append(room_sync)
        else:
            raise Exception("Unrecognized rtype: %r", room_builder.rtype)
Beispiel #4
0
class FederationServer(FederationBase):
    def __init__(self, hs):
        super(FederationServer, self).__init__(hs)

        self.auth = hs.get_auth()
        self.handler = hs.get_handlers().federation_handler

        self._server_linearizer = async .Linearizer("fed_server")
        self._transaction_linearizer = async .Linearizer("fed_txn_handler")

        self.transaction_actions = TransactionActions(self.store)

        self.registry = hs.get_federation_registry()

        # We cache responses to state queries, as they take a while and often
        # come in waves.
        self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)

    @defer.inlineCallbacks
    @log_function
    def on_backfill_request(self, origin, room_id, versions, limit):
        with (yield self._server_linearizer.queue((origin, room_id))):
            pdus = yield self.handler.on_backfill_request(
                origin, room_id, versions, limit)

            res = self._transaction_from_pdus(pdus).get_dict()

        defer.returnValue((200, res))

    @defer.inlineCallbacks
    @log_function
    def on_incoming_transaction(self, transaction_data):
        # keep this as early as possible to make the calculated origin ts as
        # accurate as possible.
        request_time = self._clock.time_msec()

        transaction = Transaction(**transaction_data)

        if not transaction.transaction_id:
            raise Exception("Transaction missing transaction_id")
        if not transaction.origin:
            raise Exception("Transaction missing origin")

        logger.debug("[%s] Got transaction", transaction.transaction_id)

        # use a linearizer to ensure that we don't process the same transaction
        # multiple times in parallel.
        with (yield self._transaction_linearizer.queue(
            (transaction.origin, transaction.transaction_id), )):
            result = yield self._handle_incoming_transaction(
                transaction,
                request_time,
            )

        defer.returnValue(result)

    @defer.inlineCallbacks
    def _handle_incoming_transaction(self, transaction, request_time):
        """ Process an incoming transaction and return the HTTP response

        Args:
            transaction (Transaction): incoming transaction
            request_time (int): timestamp that the HTTP request arrived at

        Returns:
            Deferred[(int, object)]: http response code and body
        """
        response = yield self.transaction_actions.have_responded(transaction)

        if response:
            logger.debug("[%s] We've already responded to this request",
                         transaction.transaction_id)
            defer.returnValue(response)
            return

        logger.debug("[%s] Transaction is new", transaction.transaction_id)

        received_pdus_counter.inc_by(len(transaction.pdus))

        pdus_by_room = {}

        for p in transaction.pdus:
            if "unsigned" in p:
                unsigned = p["unsigned"]
                if "age" in unsigned:
                    p["age"] = unsigned["age"]
            if "age" in p:
                p["age_ts"] = request_time - int(p["age"])
                del p["age"]

            event = event_from_pdu_json(p)
            room_id = event.room_id
            pdus_by_room.setdefault(room_id, []).append(event)

        pdu_results = {}

        # we can process different rooms in parallel (which is useful if they
        # require callouts to other servers to fetch missing events), but
        # impose a limit to avoid going too crazy with ram/cpu.
        @defer.inlineCallbacks
        def process_pdus_for_room(room_id):
            logger.debug("Processing PDUs for %s", room_id)
            for pdu in pdus_by_room[room_id]:
                event_id = pdu.event_id
                try:
                    yield self._handle_received_pdu(transaction.origin, pdu)
                    pdu_results[event_id] = {}
                except FederationError as e:
                    logger.warn("Error handling PDU %s: %s", event_id, e)
                    pdu_results[event_id] = {"error": str(e)}
                except Exception as e:
                    pdu_results[event_id] = {"error": str(e)}
                    logger.exception("Failed to handle PDU %s", event_id)

        yield async .concurrently_execute(
            process_pdus_for_room,
            pdus_by_room.keys(),
            TRANSACTION_CONCURRENCY_LIMIT,
        )

        if hasattr(transaction, "edus"):
            for edu in (Edu(**x) for x in transaction.edus):
                yield self.received_edu(transaction.origin, edu.edu_type,
                                        edu.content)

        pdu_failures = getattr(transaction, "pdu_failures", [])
        for failure in pdu_failures:
            logger.info("Got failure %r", failure)

        response = {
            "pdus": pdu_results,
        }

        logger.debug("Returning: %s", str(response))

        yield self.transaction_actions.set_response(transaction, 200, response)
        defer.returnValue((200, response))

    @defer.inlineCallbacks
    def received_edu(self, origin, edu_type, content):
        received_edus_counter.inc()
        yield self.registry.on_edu(edu_type, origin, content)

    @defer.inlineCallbacks
    @log_function
    def on_context_state_request(self, origin, room_id, event_id):
        if not event_id:
            raise NotImplementedError("Specify an event")

        in_room = yield self.auth.check_host_in_room(room_id, origin)
        if not in_room:
            raise AuthError(403, "Host not in room.")

        result = self._state_resp_cache.get((room_id, event_id))
        if not result:
            with (yield self._server_linearizer.queue((origin, room_id))):
                d = self._state_resp_cache.set(
                    (room_id, event_id),
                    preserve_fn(self._on_context_state_request_compute)(
                        room_id, event_id))
                resp = yield make_deferred_yieldable(d)
        else:
            resp = yield make_deferred_yieldable(result)

        defer.returnValue((200, resp))

    @defer.inlineCallbacks
    def on_state_ids_request(self, origin, room_id, event_id):
        if not event_id:
            raise NotImplementedError("Specify an event")

        in_room = yield self.auth.check_host_in_room(room_id, origin)
        if not in_room:
            raise AuthError(403, "Host not in room.")

        state_ids = yield self.handler.get_state_ids_for_pdu(
            room_id,
            event_id,
        )
        auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)

        defer.returnValue((200, {
            "pdu_ids": state_ids,
            "auth_chain_ids": auth_chain_ids,
        }))

    @defer.inlineCallbacks
    def _on_context_state_request_compute(self, room_id, event_id):
        pdus = yield self.handler.get_state_for_pdu(
            room_id,
            event_id,
        )
        auth_chain = yield self.store.get_auth_chain(
            [pdu.event_id for pdu in pdus])

        for event in auth_chain:
            # We sign these again because there was a bug where we
            # incorrectly signed things the first time round
            if self.hs.is_mine_id(event.event_id):
                event.signatures.update(
                    compute_event_signature(event, self.hs.hostname,
                                            self.hs.config.signing_key[0]))

        defer.returnValue({
            "pdus": [pdu.get_pdu_json() for pdu in pdus],
            "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
        })

    @defer.inlineCallbacks
    @log_function
    def on_pdu_request(self, origin, event_id):
        pdu = yield self._get_persisted_pdu(origin, event_id)

        if pdu:
            defer.returnValue(
                (200, self._transaction_from_pdus([pdu]).get_dict()))
        else:
            defer.returnValue((404, ""))

    @defer.inlineCallbacks
    @log_function
    def on_pull_request(self, origin, versions):
        raise NotImplementedError("Pull transactions not implemented")

    @defer.inlineCallbacks
    def on_query_request(self, query_type, args):
        received_queries_counter.inc(query_type)
        resp = yield self.registry.on_query(query_type, args)
        defer.returnValue((200, resp))

    @defer.inlineCallbacks
    def on_make_join_request(self, room_id, user_id):
        pdu = yield self.handler.on_make_join_request(room_id, user_id)
        time_now = self._clock.time_msec()
        defer.returnValue({"event": pdu.get_pdu_json(time_now)})

    @defer.inlineCallbacks
    def on_invite_request(self, origin, content):
        pdu = event_from_pdu_json(content)
        ret_pdu = yield self.handler.on_invite_request(origin, pdu)
        time_now = self._clock.time_msec()
        defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))

    @defer.inlineCallbacks
    def on_send_join_request(self, origin, content):
        logger.debug("on_send_join_request: content: %s", content)
        pdu = event_from_pdu_json(content)
        logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
        res_pdus = yield self.handler.on_send_join_request(origin, pdu)
        time_now = self._clock.time_msec()
        defer.returnValue((200, {
            "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
            "auth_chain":
            [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
        }))

    @defer.inlineCallbacks
    def on_make_leave_request(self, room_id, user_id):
        pdu = yield self.handler.on_make_leave_request(room_id, user_id)
        time_now = self._clock.time_msec()
        defer.returnValue({"event": pdu.get_pdu_json(time_now)})

    @defer.inlineCallbacks
    def on_send_leave_request(self, origin, content):
        logger.debug("on_send_leave_request: content: %s", content)
        pdu = event_from_pdu_json(content)
        logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
        yield self.handler.on_send_leave_request(origin, pdu)
        defer.returnValue((200, {}))

    @defer.inlineCallbacks
    def on_event_auth(self, origin, room_id, event_id):
        with (yield self._server_linearizer.queue((origin, room_id))):
            time_now = self._clock.time_msec()
            auth_pdus = yield self.handler.on_event_auth(event_id)
            res = {
                "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
            }
        defer.returnValue((200, res))

    @defer.inlineCallbacks
    def on_query_auth_request(self, origin, content, room_id, event_id):
        """
        Content is a dict with keys::
            auth_chain (list): A list of events that give the auth chain.
            missing (list): A list of event_ids indicating what the other
              side (`origin`) think we're missing.
            rejects (dict): A mapping from event_id to a 2-tuple of reason
              string and a proof (or None) of why the event was rejected.
              The keys of this dict give the list of events the `origin` has
              rejected.

        Args:
            origin (str)
            content (dict)
            event_id (str)

        Returns:
            Deferred: Results in `dict` with the same format as `content`
        """
        with (yield self._server_linearizer.queue((origin, room_id))):
            auth_chain = [
                event_from_pdu_json(e) for e in content["auth_chain"]
            ]

            signed_auth = yield self._check_sigs_and_hash_and_fetch(
                origin, auth_chain, outlier=True)

            ret = yield self.handler.on_query_auth(
                origin,
                event_id,
                signed_auth,
                content.get("rejects", []),
                content.get("missing", []),
            )

            time_now = self._clock.time_msec()
            send_content = {
                "auth_chain":
                [e.get_pdu_json(time_now) for e in ret["auth_chain"]],
                "rejects": ret.get("rejects", []),
                "missing": ret.get("missing", []),
            }

        defer.returnValue((200, send_content))

    @log_function
    def on_query_client_keys(self, origin, content):
        return self.on_query_request("client_keys", content)

    def on_query_user_devices(self, origin, user_id):
        return self.on_query_request("user_devices", user_id)

    @defer.inlineCallbacks
    @log_function
    def on_claim_client_keys(self, origin, content):
        query = []
        for user_id, device_keys in content.get("one_time_keys", {}).items():
            for device_id, algorithm in device_keys.items():
                query.append((user_id, device_id, algorithm))

        results = yield self.store.claim_e2e_one_time_keys(query)

        json_result = {}
        for user_id, device_keys in results.items():
            for device_id, keys in device_keys.items():
                for key_id, json_bytes in keys.items():
                    json_result.setdefault(user_id, {})[device_id] = {
                        key_id: json.loads(json_bytes)
                    }

        logger.info(
            "Claimed one-time-keys: %s",
            ",".join(("%s for %s:%s" % (key_id, user_id, device_id)
                      for user_id, user_keys in json_result.iteritems()
                      for device_id, device_keys in user_keys.iteritems()
                      for key_id, _ in device_keys.iteritems())),
        )

        defer.returnValue({"one_time_keys": json_result})

    @defer.inlineCallbacks
    @log_function
    def on_get_missing_events(self, origin, room_id, earliest_events,
                              latest_events, limit, min_depth):
        with (yield self._server_linearizer.queue((origin, room_id))):
            logger.info(
                "on_get_missing_events: earliest_events: %r, latest_events: %r,"
                " limit: %d, min_depth: %d", earliest_events, latest_events,
                limit, min_depth)

            missing_events = yield self.handler.on_get_missing_events(
                origin, room_id, earliest_events, latest_events, limit,
                min_depth)

            if len(missing_events) < 5:
                logger.info("Returning %d events: %r", len(missing_events),
                            missing_events)
            else:
                logger.info("Returning %d events", len(missing_events))

            time_now = self._clock.time_msec()

        defer.returnValue({
            "events": [ev.get_pdu_json(time_now) for ev in missing_events],
        })

    @log_function
    def on_openid_userinfo(self, token):
        ts_now_ms = self._clock.time_msec()
        return self.store.get_user_id_for_open_id_token(token, ts_now_ms)

    @log_function
    def _get_persisted_pdu(self, origin, event_id, do_auth=True):
        """ Get a PDU from the database with given origin and id.

        Returns:
            Deferred: Results in a `Pdu`.
        """
        return self.handler.get_persisted_pdu(origin,
                                              event_id,
                                              do_auth=do_auth)

    def _transaction_from_pdus(self, pdu_list):
        """Returns a new Transaction containing the given PDUs suitable for
        transmission.
        """
        time_now = self._clock.time_msec()
        pdus = [p.get_pdu_json(time_now) for p in pdu_list]
        return Transaction(
            origin=self.server_name,
            pdus=pdus,
            origin_server_ts=int(time_now),
            destination=None,
        )

    @defer.inlineCallbacks
    def _handle_received_pdu(self, origin, pdu):
        """ Process a PDU received in a federation /send/ transaction.

        Args:
            origin (str): server which sent the pdu
            pdu (FrozenEvent): received pdu

        Returns (Deferred): completes with None
        Raises: FederationError if the signatures / hash do not match
    """
        # check that it's actually being sent from a valid destination to
        # workaround bug #1753 in 0.18.5 and 0.18.6
        if origin != get_domain_from_id(pdu.event_id):
            # We continue to accept join events from any server; this is
            # necessary for the federation join dance to work correctly.
            # (When we join over federation, the "helper" server is
            # responsible for sending out the join event, rather than the
            # origin. See bug #1893).
            if not (pdu.type == 'm.room.member' and pdu.content
                    and pdu.content.get("membership", None) == 'join'):
                logger.info("Discarding PDU %s from invalid origin %s",
                            pdu.event_id, origin)
                return
            else:
                logger.info("Accepting join PDU %s from %s", pdu.event_id,
                            origin)

        # Check signature.
        try:
            pdu = yield self._check_sigs_and_hash(pdu)
        except SynapseError as e:
            raise FederationError(
                "ERROR",
                e.code,
                e.msg,
                affected=pdu.event_id,
            )

        yield self.handler.on_receive_pdu(origin, pdu, get_missing=True)

    def __str__(self):
        return "<ReplicationLayer(%s)>" % self.server_name

    @defer.inlineCallbacks
    def exchange_third_party_invite(
        self,
        sender_user_id,
        target_user_id,
        room_id,
        signed,
    ):
        ret = yield self.handler.exchange_third_party_invite(
            sender_user_id,
            target_user_id,
            room_id,
            signed,
        )
        defer.returnValue(ret)

    @defer.inlineCallbacks
    def on_exchange_third_party_invite_request(self, origin, room_id,
                                               event_dict):
        ret = yield self.handler.on_exchange_third_party_invite_request(
            origin, room_id, event_dict)
        defer.returnValue(ret)
Beispiel #5
0
class ApplicationServiceApi(SimpleHttpClient):
    """This class manages HS -> AS communications, including querying and
    pushing.
    """

    def __init__(self, hs):
        super(ApplicationServiceApi, self).__init__(hs)
        self.clock = hs.get_clock()

        self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)

    @defer.inlineCallbacks
    def query_user(self, service, user_id):
        if service.url is None:
            defer.returnValue(False)
        uri = service.url + ("/users/%s" % urllib.quote(user_id))
        response = None
        try:
            response = yield self.get_json(uri, {
                "access_token": service.hs_token
            })
            if response is not None:  # just an empty json object
                defer.returnValue(True)
        except CodeMessageException as e:
            if e.code == 404:
                defer.returnValue(False)
                return
            logger.warning("query_user to %s received %s", uri, e.code)
        except Exception as ex:
            logger.warning("query_user to %s threw exception %s", uri, ex)
        defer.returnValue(False)

    @defer.inlineCallbacks
    def query_alias(self, service, alias):
        if service.url is None:
            defer.returnValue(False)
        uri = service.url + ("/rooms/%s" % urllib.quote(alias))
        response = None
        try:
            response = yield self.get_json(uri, {
                "access_token": service.hs_token
            })
            if response is not None:  # just an empty json object
                defer.returnValue(True)
        except CodeMessageException as e:
            logger.warning("query_alias to %s received %s", uri, e.code)
            if e.code == 404:
                defer.returnValue(False)
                return
        except Exception as ex:
            logger.warning("query_alias to %s threw exception %s", uri, ex)
        defer.returnValue(False)

    @defer.inlineCallbacks
    def query_3pe(self, service, kind, protocol, fields):
        if kind == ThirdPartyEntityKind.USER:
            required_field = "userid"
        elif kind == ThirdPartyEntityKind.LOCATION:
            required_field = "alias"
        else:
            raise ValueError(
                "Unrecognised 'kind' argument %r to query_3pe()", kind
            )
        if service.url is None:
            defer.returnValue([])

        uri = "%s%s/thirdparty/%s/%s" % (
            service.url,
            APP_SERVICE_PREFIX,
            kind,
            urllib.quote(protocol)
        )
        try:
            response = yield self.get_json(uri, fields)
            if not isinstance(response, list):
                logger.warning(
                    "query_3pe to %s returned an invalid response %r",
                    uri, response
                )
                defer.returnValue([])

            ret = []
            for r in response:
                if _is_valid_3pe_result(r, field=required_field):
                    ret.append(r)
                else:
                    logger.warning(
                        "query_3pe to %s returned an invalid result %r",
                        uri, r
                    )

            defer.returnValue(ret)
        except Exception as ex:
            logger.warning("query_3pe to %s threw exception %s", uri, ex)
            defer.returnValue([])

    def get_3pe_protocol(self, service, protocol):
        if service.url is None:
            defer.returnValue({})

        @defer.inlineCallbacks
        def _get():
            uri = "%s%s/thirdparty/protocol/%s" % (
                service.url,
                APP_SERVICE_PREFIX,
                urllib.quote(protocol)
            )
            try:
                info = yield self.get_json(uri, {})

                if not _is_valid_3pe_metadata(info):
                    logger.warning("query_3pe_protocol to %s did not return a"
                                   " valid result", uri)
                    defer.returnValue(None)

                defer.returnValue(info)
            except Exception as ex:
                logger.warning("query_3pe_protocol to %s threw exception %s",
                               uri, ex)
                defer.returnValue(None)

        key = (service.id, protocol)
        return self.protocol_meta_cache.get(key) or (
            self.protocol_meta_cache.set(key, _get())
        )

    @defer.inlineCallbacks
    def push_bulk(self, service, events, txn_id=None):
        if service.url is None:
            defer.returnValue(True)

        events = self._serialize(events)

        if txn_id is None:
            logger.warning("push_bulk: Missing txn ID sending events to %s",
                           service.url)
            txn_id = str(0)
        txn_id = str(txn_id)

        uri = service.url + ("/transactions/%s" %
                             urllib.quote(txn_id))
        try:
            yield self.put_json(
                uri=uri,
                json_body={
                    "events": events
                },
                args={
                    "access_token": service.hs_token
                })
            defer.returnValue(True)
            return
        except CodeMessageException as e:
            logger.warning("push_bulk to %s received %s", uri, e.code)
        except Exception as ex:
            logger.warning("push_bulk to %s threw exception %s", uri, ex)
        defer.returnValue(False)

    def _serialize(self, events):
        time_now = self.clock.time_msec()
        return [
            serialize_event(e, time_now, as_client_event=True) for e in events
        ]
Beispiel #6
0
class RoomListHandler(BaseHandler):
    def __init__(self, hs):
        super(RoomListHandler, self).__init__(hs)
        self.response_cache = ResponseCache()
        self.remote_list_request_cache = ResponseCache()
        self.remote_list_cache = {}
        self.fetch_looping_call = hs.get_clock().looping_call(
            self.fetch_all_remote_lists, REMOTE_ROOM_LIST_POLL_INTERVAL)
        self.fetch_all_remote_lists()

    def get_local_public_room_list(self):
        result = self.response_cache.get(())
        if not result:
            result = self.response_cache.set((), self._get_public_room_list())
        return result

    @defer.inlineCallbacks
    def _get_public_room_list(self):
        room_ids = yield self.store.get_public_room_ids()

        results = []

        @defer.inlineCallbacks
        def handle_room(room_id):
            # We pull each bit of state out indvidually to avoid pulling the
            # full state into memory. Due to how the caching works this should
            # be fairly quick, even if not originally in the cache.
            def get_state(etype, state_key):
                return self.state_handler.get_current_state(
                    room_id, etype, state_key)

            # Double check that this is actually a public room.
            join_rules_event = yield get_state(EventTypes.JoinRules, "")
            if join_rules_event:
                join_rule = join_rules_event.content.get("join_rule", None)
                if join_rule and join_rule != JoinRules.PUBLIC:
                    defer.returnValue(None)

            result = {"room_id": room_id}

            joined_users = yield self.store.get_users_in_room(room_id)
            if len(joined_users) == 0:
                return

            result["num_joined_members"] = len(joined_users)

            aliases = yield self.store.get_aliases_for_room(room_id)
            if aliases:
                result["aliases"] = aliases

            name_event = yield get_state(EventTypes.Name, "")
            if name_event:
                name = name_event.content.get("name", None)
                if name:
                    result["name"] = name

            topic_event = yield get_state(EventTypes.Topic, "")
            if topic_event:
                topic = topic_event.content.get("topic", None)
                if topic:
                    result["topic"] = topic

            canonical_event = yield get_state(EventTypes.CanonicalAlias, "")
            if canonical_event:
                canonical_alias = canonical_event.content.get("alias", None)
                if canonical_alias:
                    result["canonical_alias"] = canonical_alias

            visibility_event = yield get_state(
                EventTypes.RoomHistoryVisibility, "")
            visibility = None
            if visibility_event:
                visibility = visibility_event.content.get(
                    "history_visibility", None)
            result["world_readable"] = visibility == "world_readable"

            guest_event = yield get_state(EventTypes.GuestAccess, "")
            guest = None
            if guest_event:
                guest = guest_event.content.get("guest_access", None)
            result["guest_can_join"] = guest == "can_join"

            avatar_event = yield get_state("m.room.avatar", "")
            if avatar_event:
                avatar_url = avatar_event.content.get("url", None)
                if avatar_url:
                    result["avatar_url"] = avatar_url

            results.append(result)

        yield concurrently_execute(handle_room, room_ids, 10)

        # FIXME (erikj): START is no longer a valid value
        defer.returnValue({"start": "START", "end": "END", "chunk": results})

    @defer.inlineCallbacks
    def fetch_all_remote_lists(self):
        deferred = self.hs.get_replication_layer().get_public_rooms(
            self.hs.config.secondary_directory_servers)
        self.remote_list_request_cache.set((), deferred)
        self.remote_list_cache = yield deferred

    @defer.inlineCallbacks
    def get_aggregated_public_room_list(self):
        """
        Get the public room list from this server and the servers
        specified in the secondary_directory_servers config option.
        XXX: Pagination...
        """
        # We return the results from out cache which is updated by a looping call,
        # unless we're missing a cache entry, in which case wait for the result
        # of the fetch if there's one in progress. If not, omit that server.
        wait = False
        for s in self.hs.config.secondary_directory_servers:
            if s not in self.remote_list_cache:
                logger.warn("No cached room list from %s: waiting for fetch",
                            s)
                wait = True
                break

        if wait and self.remote_list_request_cache.get(()):
            yield self.remote_list_request_cache.get(())

        public_rooms = yield self.get_local_public_room_list()

        # keep track of which room IDs we've seen so we can de-dup
        room_ids = set()

        # tag all the ones in our list with our server name.
        # Also add the them to the de-deping set
        for room in public_rooms['chunk']:
            room["server_name"] = self.hs.hostname
            room_ids.add(room["room_id"])

        # Now add the results from federation
        for server_name, server_result in self.remote_list_cache.items():
            for room in server_result["chunk"]:
                if room["room_id"] not in room_ids:
                    room["server_name"] = server_name
                    public_rooms["chunk"].append(room)
                    room_ids.add(room["room_id"])

        defer.returnValue(public_rooms)
Beispiel #7
0
class RoomListHandler(BaseHandler):
    def __init__(self, hs):
        super(RoomListHandler, self).__init__(hs)
        self.response_cache = ResponseCache()
        self.remote_list_request_cache = ResponseCache()
        self.remote_list_cache = {}
        self.fetch_looping_call = hs.get_clock().looping_call(
            self.fetch_all_remote_lists, REMOTE_ROOM_LIST_POLL_INTERVAL
        )
        self.fetch_all_remote_lists()

    def get_local_public_room_list(self):
        result = self.response_cache.get(())
        if not result:
            result = self.response_cache.set((), self._get_public_room_list())
        return result

    @defer.inlineCallbacks
    def _get_public_room_list(self):
        room_ids = yield self.store.get_public_room_ids()

        results = []

        @defer.inlineCallbacks
        def handle_room(room_id):
            # We pull each bit of state out indvidually to avoid pulling the
            # full state into memory. Due to how the caching works this should
            # be fairly quick, even if not originally in the cache.
            def get_state(etype, state_key):
                return self.state_handler.get_current_state(room_id, etype, state_key)

            # Double check that this is actually a public room.
            join_rules_event = yield get_state(EventTypes.JoinRules, "")
            if join_rules_event:
                join_rule = join_rules_event.content.get("join_rule", None)
                if join_rule and join_rule != JoinRules.PUBLIC:
                    defer.returnValue(None)

            result = {"room_id": room_id}

            joined_users = yield self.store.get_users_in_room(room_id)
            if len(joined_users) == 0:
                return

            result["num_joined_members"] = len(joined_users)

            aliases = yield self.store.get_aliases_for_room(room_id)
            if aliases:
                result["aliases"] = aliases

            name_event = yield get_state(EventTypes.Name, "")
            if name_event:
                name = name_event.content.get("name", None)
                if name:
                    result["name"] = name

            topic_event = yield get_state(EventTypes.Topic, "")
            if topic_event:
                topic = topic_event.content.get("topic", None)
                if topic:
                    result["topic"] = topic

            canonical_event = yield get_state(EventTypes.CanonicalAlias, "")
            if canonical_event:
                canonical_alias = canonical_event.content.get("alias", None)
                if canonical_alias:
                    result["canonical_alias"] = canonical_alias

            visibility_event = yield get_state(EventTypes.RoomHistoryVisibility, "")
            visibility = None
            if visibility_event:
                visibility = visibility_event.content.get("history_visibility", None)
            result["world_readable"] = visibility == "world_readable"

            guest_event = yield get_state(EventTypes.GuestAccess, "")
            guest = None
            if guest_event:
                guest = guest_event.content.get("guest_access", None)
            result["guest_can_join"] = guest == "can_join"

            avatar_event = yield get_state("m.room.avatar", "")
            if avatar_event:
                avatar_url = avatar_event.content.get("url", None)
                if avatar_url:
                    result["avatar_url"] = avatar_url

            results.append(result)

        yield concurrently_execute(handle_room, room_ids, 10)

        # FIXME (erikj): START is no longer a valid value
        defer.returnValue({"start": "START", "end": "END", "chunk": results})

    @defer.inlineCallbacks
    def fetch_all_remote_lists(self):
        deferred = self.hs.get_replication_layer().get_public_rooms(
            self.hs.config.secondary_directory_servers
        )
        self.remote_list_request_cache.set((), deferred)
        self.remote_list_cache = yield deferred

    @defer.inlineCallbacks
    def get_aggregated_public_room_list(self):
        """
        Get the public room list from this server and the servers
        specified in the secondary_directory_servers config option.
        XXX: Pagination...
        """
        # We return the results from out cache which is updated by a looping call,
        # unless we're missing a cache entry, in which case wait for the result
        # of the fetch if there's one in progress. If not, omit that server.
        wait = False
        for s in self.hs.config.secondary_directory_servers:
            if s not in self.remote_list_cache:
                logger.warn("No cached room list from %s: waiting for fetch", s)
                wait = True
                break

        if wait and self.remote_list_request_cache.get(()):
            yield self.remote_list_request_cache.get(())

        public_rooms = yield self.get_local_public_room_list()

        # keep track of which room IDs we've seen so we can de-dup
        room_ids = set()

        # tag all the ones in our list with our server name.
        # Also add the them to the de-deping set
        for room in public_rooms['chunk']:
            room["server_name"] = self.hs.hostname
            room_ids.add(room["room_id"])

        # Now add the results from federation
        for server_name, server_result in self.remote_list_cache.items():
            for room in server_result["chunk"]:
                if room["room_id"] not in room_ids:
                    room["server_name"] = server_name
                    public_rooms["chunk"].append(room)
                    room_ids.add(room["room_id"])

        defer.returnValue(public_rooms)
class FederationServer(FederationBase):
    def __init__(self, hs):
        super(FederationServer, self).__init__(hs)

        self.auth = hs.get_auth()

        self._server_linearizer = Linearizer("fed_server")

        # We cache responses to state queries, as they take a while and often
        # come in waves.
        self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)

    def set_handler(self, handler):
        """Sets the handler that the replication layer will use to communicate
        receipt of new PDUs from other home servers. The required methods are
        documented on :py:class:`.ReplicationHandler`.
        """
        self.handler = handler

    def register_edu_handler(self, edu_type, handler):
        if edu_type in self.edu_handlers:
            raise KeyError("Already have an EDU handler for %s" % (edu_type, ))

        self.edu_handlers[edu_type] = handler

    def register_query_handler(self, query_type, handler):
        """Sets the handler callable that will be used to handle an incoming
        federation Query of the given type.

        Args:
            query_type (str): Category name of the query, which should match
                the string used by make_query.
            handler (callable): Invoked to handle incoming queries of this type

        handler is invoked as:
            result = handler(args)

        where 'args' is a dict mapping strings to strings of the query
          arguments. It should return a Deferred that will eventually yield an
          object to encode as JSON.
        """
        if query_type in self.query_handlers:
            raise KeyError("Already have a Query handler for %s" %
                           (query_type, ))

        self.query_handlers[query_type] = handler

    @defer.inlineCallbacks
    @log_function
    def on_backfill_request(self, origin, room_id, versions, limit):
        with (yield self._server_linearizer.queue((origin, room_id))):
            pdus = yield self.handler.on_backfill_request(
                origin, room_id, versions, limit)

            res = self._transaction_from_pdus(pdus).get_dict()

        defer.returnValue((200, res))

    @defer.inlineCallbacks
    @log_function
    def on_incoming_transaction(self, transaction_data):
        transaction = Transaction(**transaction_data)

        received_pdus_counter.inc_by(len(transaction.pdus))

        for p in transaction.pdus:
            if "unsigned" in p:
                unsigned = p["unsigned"]
                if "age" in unsigned:
                    p["age"] = unsigned["age"]
            if "age" in p:
                p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
                del p["age"]

        pdu_list = [self.event_from_pdu_json(p) for p in transaction.pdus]

        logger.debug("[%s] Got transaction", transaction.transaction_id)

        response = yield self.transaction_actions.have_responded(transaction)

        if response:
            logger.debug("[%s] We've already responded to this request",
                         transaction.transaction_id)
            defer.returnValue(response)
            return

        logger.debug("[%s] Transaction is new", transaction.transaction_id)

        results = []

        for pdu in pdu_list:
            # check that it's actually being sent from a valid destination to
            # workaround bug #1753 in 0.18.5 and 0.18.6
            if transaction.origin != get_domain_from_id(pdu.event_id):
                # We continue to accept join events from any server; this is
                # necessary for the federation join dance to work correctly.
                # (When we join over federation, the "helper" server is
                # responsible for sending out the join event, rather than the
                # origin. See bug #1893).
                if not (pdu.type == 'm.room.member' and pdu.content
                        and pdu.content.get("membership", None) == 'join'):
                    logger.info("Discarding PDU %s from invalid origin %s",
                                pdu.event_id, transaction.origin)
                    continue
                else:
                    logger.info("Accepting join PDU %s from %s", pdu.event_id,
                                transaction.origin)

            try:
                yield self._handle_received_pdu(transaction.origin, pdu)
                results.append({})
            except FederationError as e:
                self.send_failure(e, transaction.origin)
                results.append({"error": str(e)})
            except Exception as e:
                results.append({"error": str(e)})
                logger.exception("Failed to handle PDU")

        if hasattr(transaction, "edus"):
            for edu in (Edu(**x) for x in transaction.edus):
                yield self.received_edu(transaction.origin, edu.edu_type,
                                        edu.content)

            for failure in getattr(transaction, "pdu_failures", []):
                logger.info("Got failure %r", failure)

        logger.debug("Returning: %s", str(results))

        response = {
            "pdus": dict(zip((p.event_id for p in pdu_list), results)),
        }

        yield self.transaction_actions.set_response(transaction, 200, response)
        defer.returnValue((200, response))

    @defer.inlineCallbacks
    def received_edu(self, origin, edu_type, content):
        received_edus_counter.inc()

        if edu_type in self.edu_handlers:
            try:
                yield self.edu_handlers[edu_type](origin, content)
            except SynapseError as e:
                logger.info("Failed to handle edu %r: %r", edu_type, e)
            except Exception as e:
                logger.exception("Failed to handle edu %r", edu_type)
        else:
            logger.warn("Received EDU of type %s with no handler", edu_type)

    @defer.inlineCallbacks
    @log_function
    def on_context_state_request(self, origin, room_id, event_id):
        if not event_id:
            raise NotImplementedError("Specify an event")

        in_room = yield self.auth.check_host_in_room(room_id, origin)
        if not in_room:
            raise AuthError(403, "Host not in room.")

        result = self._state_resp_cache.get((room_id, event_id))
        if not result:
            with (yield self._server_linearizer.queue((origin, room_id))):
                resp = yield self._state_resp_cache.set(
                    (room_id, event_id),
                    self._on_context_state_request_compute(room_id, event_id))
        else:
            resp = yield result

        defer.returnValue((200, resp))

    @defer.inlineCallbacks
    def on_state_ids_request(self, origin, room_id, event_id):
        if not event_id:
            raise NotImplementedError("Specify an event")

        in_room = yield self.auth.check_host_in_room(room_id, origin)
        if not in_room:
            raise AuthError(403, "Host not in room.")

        state_ids = yield self.handler.get_state_ids_for_pdu(
            room_id,
            event_id,
        )
        auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)

        defer.returnValue((200, {
            "pdu_ids": state_ids,
            "auth_chain_ids": auth_chain_ids,
        }))

    @defer.inlineCallbacks
    def _on_context_state_request_compute(self, room_id, event_id):
        pdus = yield self.handler.get_state_for_pdu(
            room_id,
            event_id,
        )
        auth_chain = yield self.store.get_auth_chain(
            [pdu.event_id for pdu in pdus])

        for event in auth_chain:
            # We sign these again because there was a bug where we
            # incorrectly signed things the first time round
            if self.hs.is_mine_id(event.event_id):
                event.signatures.update(
                    compute_event_signature(event, self.hs.hostname,
                                            self.hs.config.signing_key[0]))

        defer.returnValue({
            "pdus": [pdu.get_pdu_json() for pdu in pdus],
            "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
        })

    @defer.inlineCallbacks
    @log_function
    def on_pdu_request(self, origin, event_id):
        pdu = yield self._get_persisted_pdu(origin, event_id)

        if pdu:
            defer.returnValue(
                (200, self._transaction_from_pdus([pdu]).get_dict()))
        else:
            defer.returnValue((404, ""))

    @defer.inlineCallbacks
    @log_function
    def on_pull_request(self, origin, versions):
        raise NotImplementedError("Pull transactions not implemented")

    @defer.inlineCallbacks
    def on_query_request(self, query_type, args):
        received_queries_counter.inc(query_type)

        if query_type in self.query_handlers:
            response = yield self.query_handlers[query_type](args)
            defer.returnValue((200, response))
        else:
            defer.returnValue(
                (404, "No handler for Query type '%s'" % (query_type, )))

    @defer.inlineCallbacks
    def on_make_join_request(self, room_id, user_id):
        pdu = yield self.handler.on_make_join_request(room_id, user_id)
        time_now = self._clock.time_msec()
        defer.returnValue({"event": pdu.get_pdu_json(time_now)})

    @defer.inlineCallbacks
    def on_invite_request(self, origin, content):
        pdu = self.event_from_pdu_json(content)
        ret_pdu = yield self.handler.on_invite_request(origin, pdu)
        time_now = self._clock.time_msec()
        defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))

    @defer.inlineCallbacks
    def on_send_join_request(self, origin, content):
        logger.debug("on_send_join_request: content: %s", content)
        pdu = self.event_from_pdu_json(content)
        logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
        res_pdus = yield self.handler.on_send_join_request(origin, pdu)
        time_now = self._clock.time_msec()
        defer.returnValue((200, {
            "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
            "auth_chain":
            [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
        }))

    @defer.inlineCallbacks
    def on_make_leave_request(self, room_id, user_id):
        pdu = yield self.handler.on_make_leave_request(room_id, user_id)
        time_now = self._clock.time_msec()
        defer.returnValue({"event": pdu.get_pdu_json(time_now)})

    @defer.inlineCallbacks
    def on_send_leave_request(self, origin, content):
        logger.debug("on_send_leave_request: content: %s", content)
        pdu = self.event_from_pdu_json(content)
        logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
        yield self.handler.on_send_leave_request(origin, pdu)
        defer.returnValue((200, {}))

    @defer.inlineCallbacks
    def on_event_auth(self, origin, room_id, event_id):
        with (yield self._server_linearizer.queue((origin, room_id))):
            time_now = self._clock.time_msec()
            auth_pdus = yield self.handler.on_event_auth(event_id)
            res = {
                "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
            }
        defer.returnValue((200, res))

    @defer.inlineCallbacks
    def on_query_auth_request(self, origin, content, room_id, event_id):
        """
        Content is a dict with keys::
            auth_chain (list): A list of events that give the auth chain.
            missing (list): A list of event_ids indicating what the other
              side (`origin`) think we're missing.
            rejects (dict): A mapping from event_id to a 2-tuple of reason
              string and a proof (or None) of why the event was rejected.
              The keys of this dict give the list of events the `origin` has
              rejected.

        Args:
            origin (str)
            content (dict)
            event_id (str)

        Returns:
            Deferred: Results in `dict` with the same format as `content`
        """
        with (yield self._server_linearizer.queue((origin, room_id))):
            auth_chain = [
                self.event_from_pdu_json(e) for e in content["auth_chain"]
            ]

            signed_auth = yield self._check_sigs_and_hash_and_fetch(
                origin, auth_chain, outlier=True)

            ret = yield self.handler.on_query_auth(
                origin,
                event_id,
                signed_auth,
                content.get("rejects", []),
                content.get("missing", []),
            )

            time_now = self._clock.time_msec()
            send_content = {
                "auth_chain":
                [e.get_pdu_json(time_now) for e in ret["auth_chain"]],
                "rejects": ret.get("rejects", []),
                "missing": ret.get("missing", []),
            }

        defer.returnValue((200, send_content))

    @log_function
    def on_query_client_keys(self, origin, content):
        return self.on_query_request("client_keys", content)

    def on_query_user_devices(self, origin, user_id):
        return self.on_query_request("user_devices", user_id)

    @defer.inlineCallbacks
    @log_function
    def on_claim_client_keys(self, origin, content):
        query = []
        for user_id, device_keys in content.get("one_time_keys", {}).items():
            for device_id, algorithm in device_keys.items():
                query.append((user_id, device_id, algorithm))

        results = yield self.store.claim_e2e_one_time_keys(query)

        json_result = {}
        for user_id, device_keys in results.items():
            for device_id, keys in device_keys.items():
                for key_id, json_bytes in keys.items():
                    json_result.setdefault(user_id, {})[device_id] = {
                        key_id: json.loads(json_bytes)
                    }

        logger.info(
            "Claimed one-time-keys: %s",
            ",".join(("%s for %s:%s" % (key_id, user_id, device_id)
                      for user_id, user_keys in json_result.iteritems()
                      for device_id, device_keys in user_keys.iteritems()
                      for key_id, _ in device_keys.iteritems())),
        )

        defer.returnValue({"one_time_keys": json_result})

    @defer.inlineCallbacks
    @log_function
    def on_get_missing_events(self, origin, room_id, earliest_events,
                              latest_events, limit, min_depth):
        with (yield self._server_linearizer.queue((origin, room_id))):
            logger.info(
                "on_get_missing_events: earliest_events: %r, latest_events: %r,"
                " limit: %d, min_depth: %d", earliest_events, latest_events,
                limit, min_depth)

            missing_events = yield self.handler.on_get_missing_events(
                origin, room_id, earliest_events, latest_events, limit,
                min_depth)

            if len(missing_events) < 5:
                logger.info("Returning %d events: %r", len(missing_events),
                            missing_events)
            else:
                logger.info("Returning %d events", len(missing_events))

            time_now = self._clock.time_msec()

        defer.returnValue({
            "events": [ev.get_pdu_json(time_now) for ev in missing_events],
        })

    @log_function
    def on_openid_userinfo(self, token):
        ts_now_ms = self._clock.time_msec()
        return self.store.get_user_id_for_open_id_token(token, ts_now_ms)

    @log_function
    def _get_persisted_pdu(self, origin, event_id, do_auth=True):
        """ Get a PDU from the database with given origin and id.

        Returns:
            Deferred: Results in a `Pdu`.
        """
        return self.handler.get_persisted_pdu(origin,
                                              event_id,
                                              do_auth=do_auth)

    def _transaction_from_pdus(self, pdu_list):
        """Returns a new Transaction containing the given PDUs suitable for
        transmission.
        """
        time_now = self._clock.time_msec()
        pdus = [p.get_pdu_json(time_now) for p in pdu_list]
        return Transaction(
            origin=self.server_name,
            pdus=pdus,
            origin_server_ts=int(time_now),
            destination=None,
        )

    @defer.inlineCallbacks
    def _handle_received_pdu(self, origin, pdu):
        """ Process a PDU received in a federation /send/ transaction.

        Args:
            origin (str): server which sent the pdu
            pdu (FrozenEvent): received pdu

        Returns (Deferred): completes with None
        Raises: FederationError if the signatures / hash do not match
    """
        # Check signature.
        try:
            pdu = yield self._check_sigs_and_hash(pdu)
        except SynapseError as e:
            raise FederationError(
                "ERROR",
                e.code,
                e.msg,
                affected=pdu.event_id,
            )

        yield self.handler.on_receive_pdu(origin, pdu, get_missing=True)

    def __str__(self):
        return "<ReplicationLayer(%s)>" % self.server_name

    def event_from_pdu_json(self, pdu_json, outlier=False):
        event = FrozenEvent(pdu_json)

        event.internal_metadata.outlier = outlier

        return event

    @defer.inlineCallbacks
    def exchange_third_party_invite(
        self,
        sender_user_id,
        target_user_id,
        room_id,
        signed,
    ):
        ret = yield self.handler.exchange_third_party_invite(
            sender_user_id,
            target_user_id,
            room_id,
            signed,
        )
        defer.returnValue(ret)

    @defer.inlineCallbacks
    def on_exchange_third_party_invite_request(self, origin, room_id,
                                               event_dict):
        ret = yield self.handler.on_exchange_third_party_invite_request(
            origin, room_id, event_dict)
        defer.returnValue(ret)
Beispiel #9
0
class FederationServer(FederationBase):
    def __init__(self, hs):
        super(FederationServer, self).__init__(hs)

        self.auth = hs.get_auth()

        self._room_pdu_linearizer = Linearizer()
        self._server_linearizer = Linearizer()

        # We cache responses to state queries, as they take a while and often
        # come in waves.
        self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)

    def set_handler(self, handler):
        """Sets the handler that the replication layer will use to communicate
        receipt of new PDUs from other home servers. The required methods are
        documented on :py:class:`.ReplicationHandler`.
        """
        self.handler = handler

    def register_edu_handler(self, edu_type, handler):
        if edu_type in self.edu_handlers:
            raise KeyError("Already have an EDU handler for %s" % (edu_type, ))

        self.edu_handlers[edu_type] = handler

    def register_query_handler(self, query_type, handler):
        """Sets the handler callable that will be used to handle an incoming
        federation Query of the given type.

        Args:
            query_type (str): Category name of the query, which should match
                the string used by make_query.
            handler (callable): Invoked to handle incoming queries of this type

        handler is invoked as:
            result = handler(args)

        where 'args' is a dict mapping strings to strings of the query
          arguments. It should return a Deferred that will eventually yield an
          object to encode as JSON.
        """
        if query_type in self.query_handlers:
            raise KeyError("Already have a Query handler for %s" %
                           (query_type, ))

        self.query_handlers[query_type] = handler

    @defer.inlineCallbacks
    @log_function
    def on_backfill_request(self, origin, room_id, versions, limit):
        with (yield self._server_linearizer.queue((origin, room_id))):
            pdus = yield self.handler.on_backfill_request(
                origin, room_id, versions, limit)

            res = self._transaction_from_pdus(pdus).get_dict()

        defer.returnValue((200, res))

    @defer.inlineCallbacks
    @log_function
    def on_incoming_transaction(self, transaction_data):
        transaction = Transaction(**transaction_data)

        received_pdus_counter.inc_by(len(transaction.pdus))

        for p in transaction.pdus:
            if "unsigned" in p:
                unsigned = p["unsigned"]
                if "age" in unsigned:
                    p["age"] = unsigned["age"]
            if "age" in p:
                p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
                del p["age"]

        pdu_list = [self.event_from_pdu_json(p) for p in transaction.pdus]

        logger.debug("[%s] Got transaction", transaction.transaction_id)

        response = yield self.transaction_actions.have_responded(transaction)

        if response:
            logger.debug("[%s] We've already responded to this request",
                         transaction.transaction_id)
            defer.returnValue(response)
            return

        logger.debug("[%s] Transaction is new", transaction.transaction_id)

        results = []

        for pdu in pdu_list:
            # check that it's actually being sent from a valid destination to
            # workaround bug #1753 in 0.18.5 and 0.18.6
            if transaction.origin != get_domain_from_id(pdu.event_id):
                if not (pdu.type == 'm.room.member' and pdu.content
                        and pdu.content.get("membership", None) == 'join'
                        and self.hs.is_mine_id(pdu.state_key)):
                    logger.info("Discarding PDU %s from invalid origin %s",
                                pdu.event_id, transaction.origin)
                    continue
                else:
                    logger.info("Accepting join PDU %s from %s", pdu.event_id,
                                transaction.origin)

            try:
                yield self._handle_new_pdu(transaction.origin, pdu)
                results.append({})
            except FederationError as e:
                self.send_failure(e, transaction.origin)
                results.append({"error": str(e)})
            except Exception as e:
                results.append({"error": str(e)})
                logger.exception("Failed to handle PDU")

        if hasattr(transaction, "edus"):
            for edu in (Edu(**x) for x in transaction.edus):
                yield self.received_edu(transaction.origin, edu.edu_type,
                                        edu.content)

            for failure in getattr(transaction, "pdu_failures", []):
                logger.info("Got failure %r", failure)

        logger.debug("Returning: %s", str(results))

        response = {
            "pdus": dict(zip((p.event_id for p in pdu_list), results)),
        }

        yield self.transaction_actions.set_response(transaction, 200, response)
        defer.returnValue((200, response))

    @defer.inlineCallbacks
    def received_edu(self, origin, edu_type, content):
        received_edus_counter.inc()

        if edu_type in self.edu_handlers:
            try:
                yield self.edu_handlers[edu_type](origin, content)
            except SynapseError as e:
                logger.info("Failed to handle edu %r: %r", edu_type, e)
            except Exception as e:
                logger.exception("Failed to handle edu %r", edu_type)
        else:
            logger.warn("Received EDU of type %s with no handler", edu_type)

    @defer.inlineCallbacks
    @log_function
    def on_context_state_request(self, origin, room_id, event_id):
        if not event_id:
            raise NotImplementedError("Specify an event")

        in_room = yield self.auth.check_host_in_room(room_id, origin)
        if not in_room:
            raise AuthError(403, "Host not in room.")

        result = self._state_resp_cache.get((room_id, event_id))
        if not result:
            with (yield self._server_linearizer.queue((origin, room_id))):
                resp = yield self._state_resp_cache.set(
                    (room_id, event_id),
                    self._on_context_state_request_compute(room_id, event_id))
        else:
            resp = yield result

        defer.returnValue((200, resp))

    @defer.inlineCallbacks
    def on_state_ids_request(self, origin, room_id, event_id):
        if not event_id:
            raise NotImplementedError("Specify an event")

        in_room = yield self.auth.check_host_in_room(room_id, origin)
        if not in_room:
            raise AuthError(403, "Host not in room.")

        state_ids = yield self.handler.get_state_ids_for_pdu(
            room_id,
            event_id,
        )
        auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)

        defer.returnValue((200, {
            "pdu_ids": state_ids,
            "auth_chain_ids": auth_chain_ids,
        }))

    @defer.inlineCallbacks
    def _on_context_state_request_compute(self, room_id, event_id):
        pdus = yield self.handler.get_state_for_pdu(
            room_id,
            event_id,
        )
        auth_chain = yield self.store.get_auth_chain(
            [pdu.event_id for pdu in pdus])

        for event in auth_chain:
            # We sign these again because there was a bug where we
            # incorrectly signed things the first time round
            if self.hs.is_mine_id(event.event_id):
                event.signatures.update(
                    compute_event_signature(event, self.hs.hostname,
                                            self.hs.config.signing_key[0]))

        defer.returnValue({
            "pdus": [pdu.get_pdu_json() for pdu in pdus],
            "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
        })

    @defer.inlineCallbacks
    @log_function
    def on_pdu_request(self, origin, event_id):
        pdu = yield self._get_persisted_pdu(origin, event_id)

        if pdu:
            defer.returnValue(
                (200, self._transaction_from_pdus([pdu]).get_dict()))
        else:
            defer.returnValue((404, ""))

    @defer.inlineCallbacks
    @log_function
    def on_pull_request(self, origin, versions):
        raise NotImplementedError("Pull transactions not implemented")

    @defer.inlineCallbacks
    def on_query_request(self, query_type, args):
        received_queries_counter.inc(query_type)

        if query_type in self.query_handlers:
            response = yield self.query_handlers[query_type](args)
            defer.returnValue((200, response))
        else:
            defer.returnValue(
                (404, "No handler for Query type '%s'" % (query_type, )))

    @defer.inlineCallbacks
    def on_make_join_request(self, room_id, user_id):
        pdu = yield self.handler.on_make_join_request(room_id, user_id)
        time_now = self._clock.time_msec()
        defer.returnValue({"event": pdu.get_pdu_json(time_now)})

    @defer.inlineCallbacks
    def on_invite_request(self, origin, content):
        pdu = self.event_from_pdu_json(content)
        ret_pdu = yield self.handler.on_invite_request(origin, pdu)
        time_now = self._clock.time_msec()
        defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))

    @defer.inlineCallbacks
    def on_send_join_request(self, origin, content):
        logger.debug("on_send_join_request: content: %s", content)
        pdu = self.event_from_pdu_json(content)
        logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
        res_pdus = yield self.handler.on_send_join_request(origin, pdu)
        time_now = self._clock.time_msec()
        defer.returnValue((200, {
            "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
            "auth_chain":
            [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
        }))

    @defer.inlineCallbacks
    def on_make_leave_request(self, room_id, user_id):
        pdu = yield self.handler.on_make_leave_request(room_id, user_id)
        time_now = self._clock.time_msec()
        defer.returnValue({"event": pdu.get_pdu_json(time_now)})

    @defer.inlineCallbacks
    def on_send_leave_request(self, origin, content):
        logger.debug("on_send_leave_request: content: %s", content)
        pdu = self.event_from_pdu_json(content)
        logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
        yield self.handler.on_send_leave_request(origin, pdu)
        defer.returnValue((200, {}))

    @defer.inlineCallbacks
    def on_event_auth(self, origin, room_id, event_id):
        with (yield self._server_linearizer.queue((origin, room_id))):
            time_now = self._clock.time_msec()
            auth_pdus = yield self.handler.on_event_auth(event_id)
            res = {
                "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
            }
        defer.returnValue((200, res))

    @defer.inlineCallbacks
    def on_query_auth_request(self, origin, content, room_id, event_id):
        """
        Content is a dict with keys::
            auth_chain (list): A list of events that give the auth chain.
            missing (list): A list of event_ids indicating what the other
              side (`origin`) think we're missing.
            rejects (dict): A mapping from event_id to a 2-tuple of reason
              string and a proof (or None) of why the event was rejected.
              The keys of this dict give the list of events the `origin` has
              rejected.

        Args:
            origin (str)
            content (dict)
            event_id (str)

        Returns:
            Deferred: Results in `dict` with the same format as `content`
        """
        with (yield self._server_linearizer.queue((origin, room_id))):
            auth_chain = [
                self.event_from_pdu_json(e) for e in content["auth_chain"]
            ]

            signed_auth = yield self._check_sigs_and_hash_and_fetch(
                origin, auth_chain, outlier=True)

            ret = yield self.handler.on_query_auth(
                origin,
                event_id,
                signed_auth,
                content.get("rejects", []),
                content.get("missing", []),
            )

            time_now = self._clock.time_msec()
            send_content = {
                "auth_chain":
                [e.get_pdu_json(time_now) for e in ret["auth_chain"]],
                "rejects": ret.get("rejects", []),
                "missing": ret.get("missing", []),
            }

        defer.returnValue((200, send_content))

    @log_function
    def on_query_client_keys(self, origin, content):
        return self.on_query_request("client_keys", content)

    @defer.inlineCallbacks
    @log_function
    def on_claim_client_keys(self, origin, content):
        query = []
        for user_id, device_keys in content.get("one_time_keys", {}).items():
            for device_id, algorithm in device_keys.items():
                query.append((user_id, device_id, algorithm))

        results = yield self.store.claim_e2e_one_time_keys(query)

        json_result = {}
        for user_id, device_keys in results.items():
            for device_id, keys in device_keys.items():
                for key_id, json_bytes in keys.items():
                    json_result.setdefault(user_id, {})[device_id] = {
                        key_id: json.loads(json_bytes)
                    }

        defer.returnValue({"one_time_keys": json_result})

    @defer.inlineCallbacks
    @log_function
    def on_get_missing_events(self, origin, room_id, earliest_events,
                              latest_events, limit, min_depth):
        with (yield self._server_linearizer.queue((origin, room_id))):
            logger.info(
                "on_get_missing_events: earliest_events: %r, latest_events: %r,"
                " limit: %d, min_depth: %d", earliest_events, latest_events,
                limit, min_depth)

            missing_events = yield self.handler.on_get_missing_events(
                origin, room_id, earliest_events, latest_events, limit,
                min_depth)

            if len(missing_events) < 5:
                logger.info("Returning %d events: %r", len(missing_events),
                            missing_events)
            else:
                logger.info("Returning %d events", len(missing_events))

            time_now = self._clock.time_msec()

        defer.returnValue({
            "events": [ev.get_pdu_json(time_now) for ev in missing_events],
        })

    @log_function
    def on_openid_userinfo(self, token):
        ts_now_ms = self._clock.time_msec()
        return self.store.get_user_id_for_open_id_token(token, ts_now_ms)

    @log_function
    def _get_persisted_pdu(self, origin, event_id, do_auth=True):
        """ Get a PDU from the database with given origin and id.

        Returns:
            Deferred: Results in a `Pdu`.
        """
        return self.handler.get_persisted_pdu(origin,
                                              event_id,
                                              do_auth=do_auth)

    def _transaction_from_pdus(self, pdu_list):
        """Returns a new Transaction containing the given PDUs suitable for
        transmission.
        """
        time_now = self._clock.time_msec()
        pdus = [p.get_pdu_json(time_now) for p in pdu_list]
        return Transaction(
            origin=self.server_name,
            pdus=pdus,
            origin_server_ts=int(time_now),
            destination=None,
        )

    @defer.inlineCallbacks
    @log_function
    def _handle_new_pdu(self, origin, pdu, get_missing=True):

        # We reprocess pdus when we have seen them only as outliers
        existing = yield self._get_persisted_pdu(origin,
                                                 pdu.event_id,
                                                 do_auth=False)

        # FIXME: Currently we fetch an event again when we already have it
        # if it has been marked as an outlier.

        already_seen = (existing
                        and (not existing.internal_metadata.is_outlier()
                             or pdu.internal_metadata.is_outlier()))
        if already_seen:
            logger.debug("Already seen pdu %s", pdu.event_id)
            return

        # Check signature.
        try:
            pdu = yield self._check_sigs_and_hash(pdu)
        except SynapseError as e:
            raise FederationError(
                "ERROR",
                e.code,
                e.msg,
                affected=pdu.event_id,
            )

        state = None

        auth_chain = []

        have_seen = yield self.store.have_events(
            [ev for ev, _ in pdu.prev_events])

        fetch_state = False

        # Get missing pdus if necessary.
        if not pdu.internal_metadata.is_outlier():
            # We only backfill backwards to the min depth.
            min_depth = yield self.handler.get_min_depth_for_context(
                pdu.room_id)

            logger.debug("_handle_new_pdu min_depth for %s: %d", pdu.room_id,
                         min_depth)

            prevs = {e_id for e_id, _ in pdu.prev_events}
            seen = set(have_seen.keys())

            if min_depth and pdu.depth < min_depth:
                # This is so that we don't notify the user about this
                # message, to work around the fact that some events will
                # reference really really old events we really don't want to
                # send to the clients.
                pdu.internal_metadata.outlier = True
            elif min_depth and pdu.depth > min_depth:
                if get_missing and prevs - seen:
                    # If we're missing stuff, ensure we only fetch stuff one
                    # at a time.
                    logger.info(
                        "Acquiring lock for room %r to fetch %d missing events: %r...",
                        pdu.room_id,
                        len(prevs - seen),
                        list(prevs - seen)[:5],
                    )
                    with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
                        logger.info(
                            "Acquired lock for room %r to fetch %d missing events",
                            pdu.room_id,
                            len(prevs - seen),
                        )

                        # We recalculate seen, since it may have changed.
                        have_seen = yield self.store.have_events(prevs)
                        seen = set(have_seen.keys())

                        if prevs - seen:
                            latest = yield self.store.get_latest_event_ids_in_room(
                                pdu.room_id)

                            # We add the prev events that we have seen to the latest
                            # list to ensure the remote server doesn't give them to us
                            latest = set(latest)
                            latest |= seen

                            logger.info("Missing %d events for room %r: %r...",
                                        len(prevs - seen), pdu.room_id,
                                        list(prevs - seen)[:5])

                            # XXX: we set timeout to 10s to help workaround
                            # https://github.com/matrix-org/synapse/issues/1733.
                            # The reason is to avoid holding the linearizer lock
                            # whilst processing inbound /send transactions, causing
                            # FDs to stack up and block other inbound transactions
                            # which empirically can currently take up to 30 minutes.
                            #
                            # N.B. this explicitly disables retry attempts.
                            #
                            # N.B. this also increases our chances of falling back to
                            # fetching fresh state for the room if the missing event
                            # can't be found, which slightly reduces our security.
                            # it may also increase our DAG extremity count for the room,
                            # causing additional state resolution?  See #1760.
                            # However, fetching state doesn't hold the linearizer lock
                            # apparently.
                            #
                            # see https://github.com/matrix-org/synapse/pull/1744

                            missing_events = yield self.get_missing_events(
                                origin,
                                pdu.room_id,
                                earliest_events_ids=list(latest),
                                latest_events=[pdu],
                                limit=10,
                                min_depth=min_depth,
                                timeout=10000,
                            )

                            # We want to sort these by depth so we process them and
                            # tell clients about them in order.
                            missing_events.sort(key=lambda x: x.depth)

                            for e in missing_events:
                                yield self._handle_new_pdu(origin,
                                                           e,
                                                           get_missing=False)

                            have_seen = yield self.store.have_events(
                                [ev for ev, _ in pdu.prev_events])

            prevs = {e_id for e_id, _ in pdu.prev_events}
            seen = set(have_seen.keys())
            if prevs - seen:
                logger.info("Still missing %d events for room %r: %r...",
                            len(prevs - seen), pdu.room_id,
                            list(prevs - seen)[:5])
                fetch_state = True

        if fetch_state:
            # We need to get the state at this event, since we haven't
            # processed all the prev events.
            logger.debug("_handle_new_pdu getting state for %s", pdu.room_id)
            try:
                state, auth_chain = yield self.get_state_for_room(
                    origin,
                    pdu.room_id,
                    pdu.event_id,
                )
            except:
                logger.exception("Failed to get state for event: %s",
                                 pdu.event_id)

        yield self.handler.on_receive_pdu(
            origin,
            pdu,
            state=state,
            auth_chain=auth_chain,
        )

    def __str__(self):
        return "<ReplicationLayer(%s)>" % self.server_name

    def event_from_pdu_json(self, pdu_json, outlier=False):
        event = FrozenEvent(pdu_json)

        event.internal_metadata.outlier = outlier

        return event

    @defer.inlineCallbacks
    def exchange_third_party_invite(
        self,
        sender_user_id,
        target_user_id,
        room_id,
        signed,
    ):
        ret = yield self.handler.exchange_third_party_invite(
            sender_user_id,
            target_user_id,
            room_id,
            signed,
        )
        defer.returnValue(ret)

    @defer.inlineCallbacks
    def on_exchange_third_party_invite_request(self, origin, room_id,
                                               event_dict):
        ret = yield self.handler.on_exchange_third_party_invite_request(
            origin, room_id, event_dict)
        defer.returnValue(ret)
Beispiel #10
0
class RoomListHandler(BaseHandler):
    def __init__(self, hs):
        super(RoomListHandler, self).__init__(hs)
        self.response_cache = ResponseCache(hs)
        self.remote_response_cache = ResponseCache(hs, timeout_ms=30 * 1000)

    def get_local_public_room_list(self, limit=None, since_token=None,
                                   search_filter=None):
        if search_filter:
            # We explicitly don't bother caching searches.
            return self._get_public_room_list(limit, since_token, search_filter)

        result = self.response_cache.get((limit, since_token))
        if not result:
            result = self.response_cache.set(
                (limit, since_token),
                self._get_public_room_list(limit, since_token)
            )
        return result

    @defer.inlineCallbacks
    def _get_public_room_list(self, limit=None, since_token=None,
                              search_filter=None):
        if since_token and since_token != "END":
            since_token = RoomListNextBatch.from_token(since_token)
        else:
            since_token = None

        rooms_to_order_value = {}
        rooms_to_num_joined = {}
        rooms_to_latest_event_ids = {}

        newly_visible = []
        newly_unpublished = []
        if since_token:
            stream_token = since_token.stream_ordering
            current_public_id = yield self.store.get_current_public_room_stream_id()
            public_room_stream_id = since_token.public_room_stream_id
            newly_visible, newly_unpublished = yield self.store.get_public_room_changes(
                public_room_stream_id, current_public_id
            )
        else:
            stream_token = yield self.store.get_room_max_stream_ordering()
            public_room_stream_id = yield self.store.get_current_public_room_stream_id()

        room_ids = yield self.store.get_public_room_ids_at_stream_id(
            public_room_stream_id
        )

        # We want to return rooms in a particular order: the number of joined
        # users. We then arbitrarily use the room_id as a tie breaker.

        @defer.inlineCallbacks
        def get_order_for_room(room_id):
            latest_event_ids = rooms_to_latest_event_ids.get(room_id, None)
            if not latest_event_ids:
                latest_event_ids = yield self.store.get_forward_extremeties_for_room(
                    room_id, stream_token
                )
                rooms_to_latest_event_ids[room_id] = latest_event_ids

            if not latest_event_ids:
                return

            joined_users = yield self.state_handler.get_current_user_in_room(
                room_id, latest_event_ids,
            )
            num_joined_users = len(joined_users)
            rooms_to_num_joined[room_id] = num_joined_users

            if num_joined_users == 0:
                return

            # We want larger rooms to be first, hence negating num_joined_users
            rooms_to_order_value[room_id] = (-num_joined_users, room_id)

        yield concurrently_execute(get_order_for_room, room_ids, 10)

        sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
        sorted_rooms = [room_id for room_id, _ in sorted_entries]

        # `sorted_rooms` should now be a list of all public room ids that is
        # stable across pagination. Therefore, we can use indices into this
        # list as our pagination tokens.

        # Filter out rooms that we don't want to return
        rooms_to_scan = [
            r for r in sorted_rooms
            if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
        ]

        if since_token:
            # Filter out rooms we've already returned previously
            # `since_token.current_limit` is the index of the last room we
            # sent down, so we exclude it and everything before/after it.
            if since_token.direction_is_forward:
                rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
            else:
                rooms_to_scan = rooms_to_scan[:since_token.current_limit]
                rooms_to_scan.reverse()

        # Actually generate the entries. _generate_room_entry will append to
        # chunk but will stop if len(chunk) > limit
        chunk = []
        if limit and not search_filter:
            step = limit + 1
            for i in xrange(0, len(rooms_to_scan), step):
                # We iterate here because the vast majority of cases we'll stop
                # at first iteration, but occaisonally _generate_room_entry
                # won't append to the chunk and so we need to loop again.
                # We don't want to scan over the entire range either as that
                # would potentially waste a lot of work.
                yield concurrently_execute(
                    lambda r: self._generate_room_entry(
                        r, rooms_to_num_joined[r],
                        chunk, limit, search_filter
                    ),
                    rooms_to_scan[i:i + step], 10
                )
                if len(chunk) >= limit + 1:
                    break
        else:
            yield concurrently_execute(
                lambda r: self._generate_room_entry(
                    r, rooms_to_num_joined[r],
                    chunk, limit, search_filter
                ),
                rooms_to_scan, 5
            )

        chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))

        # Work out the new limit of the batch for pagination, or None if we
        # know there are no more results that would be returned.
        # i.e., [since_token.current_limit..new_limit] is the batch of rooms
        # we've returned (or the reverse if we paginated backwards)
        # We tried to pull out limit + 1 rooms above, so if we have <= limit
        # then we know there are no more results to return
        new_limit = None
        if chunk and (not limit or len(chunk) > limit):

            if not since_token or since_token.direction_is_forward:
                if limit:
                    chunk = chunk[:limit]
                last_room_id = chunk[-1]["room_id"]
            else:
                if limit:
                    chunk = chunk[-limit:]
                last_room_id = chunk[0]["room_id"]

            new_limit = sorted_rooms.index(last_room_id)

        results = {
            "chunk": chunk,
        }

        if since_token:
            results["new_rooms"] = bool(newly_visible)

        if not since_token or since_token.direction_is_forward:
            if new_limit is not None:
                results["next_batch"] = RoomListNextBatch(
                    stream_ordering=stream_token,
                    public_room_stream_id=public_room_stream_id,
                    current_limit=new_limit,
                    direction_is_forward=True,
                ).to_token()

            if since_token:
                results["prev_batch"] = since_token.copy_and_replace(
                    direction_is_forward=False,
                    current_limit=since_token.current_limit + 1,
                ).to_token()
        else:
            if new_limit is not None:
                results["prev_batch"] = RoomListNextBatch(
                    stream_ordering=stream_token,
                    public_room_stream_id=public_room_stream_id,
                    current_limit=new_limit,
                    direction_is_forward=False,
                ).to_token()

            if since_token:
                results["next_batch"] = since_token.copy_and_replace(
                    direction_is_forward=True,
                    current_limit=since_token.current_limit - 1,
                ).to_token()

        defer.returnValue(results)

    @defer.inlineCallbacks
    def _generate_room_entry(self, room_id, num_joined_users, chunk, limit,
                             search_filter):
        if limit and len(chunk) > limit + 1:
            # We've already got enough, so lets just drop it.
            return

        result = {
            "room_id": room_id,
            "num_joined_members": num_joined_users,
        }

        current_state_ids = yield self.state_handler.get_current_state_ids(room_id)

        event_map = yield self.store.get_events([
            event_id for key, event_id in current_state_ids.items()
            if key[0] in (
                EventTypes.JoinRules,
                EventTypes.Name,
                EventTypes.Topic,
                EventTypes.CanonicalAlias,
                EventTypes.RoomHistoryVisibility,
                EventTypes.GuestAccess,
                "m.room.avatar",
            )
        ])

        current_state = {
            (ev.type, ev.state_key): ev
            for ev in event_map.values()
        }

        # Double check that this is actually a public room.
        join_rules_event = current_state.get((EventTypes.JoinRules, ""))
        if join_rules_event:
            join_rule = join_rules_event.content.get("join_rule", None)
            if join_rule and join_rule != JoinRules.PUBLIC:
                defer.returnValue(None)

        aliases = yield self.store.get_aliases_for_room(room_id)
        if aliases:
            result["aliases"] = aliases

        name_event = yield current_state.get((EventTypes.Name, ""))
        if name_event:
            name = name_event.content.get("name", None)
            if name:
                result["name"] = name

        topic_event = current_state.get((EventTypes.Topic, ""))
        if topic_event:
            topic = topic_event.content.get("topic", None)
            if topic:
                result["topic"] = topic

        canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
        if canonical_event:
            canonical_alias = canonical_event.content.get("alias", None)
            if canonical_alias:
                result["canonical_alias"] = canonical_alias

        visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
        visibility = None
        if visibility_event:
            visibility = visibility_event.content.get("history_visibility", None)
        result["world_readable"] = visibility == "world_readable"

        guest_event = current_state.get((EventTypes.GuestAccess, ""))
        guest = None
        if guest_event:
            guest = guest_event.content.get("guest_access", None)
        result["guest_can_join"] = guest == "can_join"

        avatar_event = current_state.get(("m.room.avatar", ""))
        if avatar_event:
            avatar_url = avatar_event.content.get("url", None)
            if avatar_url:
                result["avatar_url"] = avatar_url

        if _matches_room_entry(result, search_filter):
            chunk.append(result)

    @defer.inlineCallbacks
    def get_remote_public_room_list(self, server_name, limit=None, since_token=None,
                                    search_filter=None):
        if search_filter:
            # We currently don't support searching across federation, so we have
            # to do it manually without pagination
            limit = None
            since_token = None

        res = yield self._get_remote_list_cached(
            server_name, limit=limit, since_token=since_token,
        )

        if search_filter:
            res = {"chunk": [
                entry
                for entry in list(res.get("chunk", []))
                if _matches_room_entry(entry, search_filter)
            ]}

        defer.returnValue(res)

    def _get_remote_list_cached(self, server_name, limit=None, since_token=None,
                                search_filter=None):
        repl_layer = self.hs.get_replication_layer()
        if search_filter:
            # We can't cache when asking for search
            return repl_layer.get_public_rooms(
                server_name, limit=limit, since_token=since_token,
                search_filter=search_filter,
            )

        result = self.remote_response_cache.get((server_name, limit, since_token))
        if not result:
            result = self.remote_response_cache.set(
                (server_name, limit, since_token),
                repl_layer.get_public_rooms(
                    server_name, limit=limit, since_token=since_token,
                    search_filter=search_filter,
                )
            )
        return result
Beispiel #11
0
class ReplicationSendEventRestServlet(RestServlet):
    """Handles events newly created on workers, including persisting and
    notifying.

    The API looks like:

        POST /_synapse/replication/send_event/:event_id

        {
            "event": { .. serialized event .. },
            "internal_metadata": { .. serialized internal_metadata .. },
            "rejected_reason": ..,   // The event.rejected_reason field
            "context": { .. serialized event context .. },
            "requester": { .. serialized requester .. },
            "ratelimit": true,
            "extra_users": [],
        }
    """
    PATTERNS = [
        re.compile("^/_synapse/replication/send_event/(?P<event_id>[^/]+)$")
    ]

    def __init__(self, hs):
        super(ReplicationSendEventRestServlet, self).__init__()

        self.event_creation_handler = hs.get_event_creation_handler()
        self.store = hs.get_datastore()
        self.clock = hs.get_clock()

        # The responses are tiny, so we may as well cache them for a while
        self.response_cache = ResponseCache(hs, timeout_ms=30 * 60 * 1000)

    def on_PUT(self, request, event_id):
        result = self.response_cache.get(event_id)
        if not result:
            result = self.response_cache.set(event_id,
                                             self._handle_request(request))
        else:
            logger.warn("Returning cached response")
        return make_deferred_yieldable(result)

    @preserve_fn
    @defer.inlineCallbacks
    def _handle_request(self, request):
        with Measure(self.clock, "repl_send_event_parse"):
            content = parse_json_object_from_request(request)

            event_dict = content["event"]
            internal_metadata = content["internal_metadata"]
            rejected_reason = content["rejected_reason"]
            event = FrozenEvent(event_dict, internal_metadata, rejected_reason)

            requester = Requester.deserialize(self.store, content["requester"])
            context = yield EventContext.deserialize(self.store,
                                                     content["context"])

            ratelimit = content["ratelimit"]
            extra_users = [
                UserID.from_string(u) for u in content["extra_users"]
            ]

        if requester.user:
            request.authenticated_entity = requester.user.to_string()

        logger.info(
            "Got event to send with ID: %s into room: %s",
            event.event_id,
            event.room_id,
        )

        yield self.event_creation_handler.persist_and_notify_client_event(
            requester,
            event,
            context,
            ratelimit=ratelimit,
            extra_users=extra_users,
        )

        defer.returnValue((200, {}))
class FederationServer(FederationBase):
    def __init__(self, hs):
        super(FederationServer, self).__init__(hs)

        self.auth = hs.get_auth()

        self._room_pdu_linearizer = Linearizer()
        self._server_linearizer = Linearizer()

        # We cache responses to state queries, as they take a while and often
        # come in waves.
        self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)

    def set_handler(self, handler):
        """Sets the handler that the replication layer will use to communicate
        receipt of new PDUs from other home servers. The required methods are
        documented on :py:class:`.ReplicationHandler`.
        """
        self.handler = handler

    def register_edu_handler(self, edu_type, handler):
        if edu_type in self.edu_handlers:
            raise KeyError("Already have an EDU handler for %s" % (edu_type,))

        self.edu_handlers[edu_type] = handler

    def register_query_handler(self, query_type, handler):
        """Sets the handler callable that will be used to handle an incoming
        federation Query of the given type.

        Args:
            query_type (str): Category name of the query, which should match
                the string used by make_query.
            handler (callable): Invoked to handle incoming queries of this type

        handler is invoked as:
            result = handler(args)

        where 'args' is a dict mapping strings to strings of the query
          arguments. It should return a Deferred that will eventually yield an
          object to encode as JSON.
        """
        if query_type in self.query_handlers:
            raise KeyError(
                "Already have a Query handler for %s" % (query_type,)
            )

        self.query_handlers[query_type] = handler

    @defer.inlineCallbacks
    @log_function
    def on_backfill_request(self, origin, room_id, versions, limit):
        with (yield self._server_linearizer.queue((origin, room_id))):
            pdus = yield self.handler.on_backfill_request(
                origin, room_id, versions, limit
            )

            res = self._transaction_from_pdus(pdus).get_dict()

        defer.returnValue((200, res))

    @defer.inlineCallbacks
    @log_function
    def on_incoming_transaction(self, transaction_data):
        transaction = Transaction(**transaction_data)

        received_pdus_counter.inc_by(len(transaction.pdus))

        for p in transaction.pdus:
            if "unsigned" in p:
                unsigned = p["unsigned"]
                if "age" in unsigned:
                    p["age"] = unsigned["age"]
            if "age" in p:
                p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
                del p["age"]

        pdu_list = [
            self.event_from_pdu_json(p) for p in transaction.pdus
        ]

        logger.debug("[%s] Got transaction", transaction.transaction_id)

        response = yield self.transaction_actions.have_responded(transaction)

        if response:
            logger.debug(
                "[%s] We've already responed to this request",
                transaction.transaction_id
            )
            defer.returnValue(response)
            return

        logger.debug("[%s] Transaction is new", transaction.transaction_id)

        results = []

        for pdu in pdu_list:
            try:
                yield self._handle_new_pdu(transaction.origin, pdu)
                results.append({})
            except FederationError as e:
                self.send_failure(e, transaction.origin)
                results.append({"error": str(e)})
            except Exception as e:
                results.append({"error": str(e)})
                logger.exception("Failed to handle PDU")

        if hasattr(transaction, "edus"):
            for edu in (Edu(**x) for x in transaction.edus):
                yield self.received_edu(
                    transaction.origin,
                    edu.edu_type,
                    edu.content
                )

            for failure in getattr(transaction, "pdu_failures", []):
                logger.info("Got failure %r", failure)

        logger.debug("Returning: %s", str(results))

        response = {
            "pdus": dict(zip(
                (p.event_id for p in pdu_list), results
            )),
        }

        yield self.transaction_actions.set_response(
            transaction,
            200, response
        )
        defer.returnValue((200, response))

    @defer.inlineCallbacks
    def received_edu(self, origin, edu_type, content):
        received_edus_counter.inc()

        if edu_type in self.edu_handlers:
            try:
                yield self.edu_handlers[edu_type](origin, content)
            except SynapseError as e:
                logger.info("Failed to handle edu %r: %r", edu_type, e)
            except Exception as e:
                logger.exception("Failed to handle edu %r", edu_type, e)
        else:
            logger.warn("Received EDU of type %s with no handler", edu_type)

    @defer.inlineCallbacks
    @log_function
    def on_context_state_request(self, origin, room_id, event_id):
        if not event_id:
            raise NotImplementedError("Specify an event")

        in_room = yield self.auth.check_host_in_room(room_id, origin)
        if not in_room:
            raise AuthError(403, "Host not in room.")

        result = self._state_resp_cache.get((room_id, event_id))
        if not result:
            with (yield self._server_linearizer.queue((origin, room_id))):
                resp = yield self._state_resp_cache.set(
                    (room_id, event_id),
                    self._on_context_state_request_compute(room_id, event_id)
                )
        else:
            resp = yield result

        defer.returnValue((200, resp))

    @defer.inlineCallbacks
    def on_state_ids_request(self, origin, room_id, event_id):
        if not event_id:
            raise NotImplementedError("Specify an event")

        in_room = yield self.auth.check_host_in_room(room_id, origin)
        if not in_room:
            raise AuthError(403, "Host not in room.")

        pdus = yield self.handler.get_state_for_pdu(
            room_id, event_id,
        )
        auth_chain = yield self.store.get_auth_chain(
            [pdu.event_id for pdu in pdus]
        )

        defer.returnValue((200, {
            "pdu_ids": [pdu.event_id for pdu in pdus],
            "auth_chain_ids": [pdu.event_id for pdu in auth_chain],
        }))

    @defer.inlineCallbacks
    def _on_context_state_request_compute(self, room_id, event_id):
        pdus = yield self.handler.get_state_for_pdu(
            room_id, event_id,
        )
        auth_chain = yield self.store.get_auth_chain(
            [pdu.event_id for pdu in pdus]
        )

        for event in auth_chain:
            # We sign these again because there was a bug where we
            # incorrectly signed things the first time round
            if self.hs.is_mine_id(event.event_id):
                event.signatures.update(
                    compute_event_signature(
                        event,
                        self.hs.hostname,
                        self.hs.config.signing_key[0]
                    )
                )

        defer.returnValue({
            "pdus": [pdu.get_pdu_json() for pdu in pdus],
            "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
        })

    @defer.inlineCallbacks
    @log_function
    def on_pdu_request(self, origin, event_id):
        pdu = yield self._get_persisted_pdu(origin, event_id)

        if pdu:
            defer.returnValue(
                (200, self._transaction_from_pdus([pdu]).get_dict())
            )
        else:
            defer.returnValue((404, ""))

    @defer.inlineCallbacks
    @log_function
    def on_pull_request(self, origin, versions):
        raise NotImplementedError("Pull transactions not implemented")

    @defer.inlineCallbacks
    def on_query_request(self, query_type, args):
        received_queries_counter.inc(query_type)

        if query_type in self.query_handlers:
            response = yield self.query_handlers[query_type](args)
            defer.returnValue((200, response))
        else:
            defer.returnValue(
                (404, "No handler for Query type '%s'" % (query_type,))
            )

    @defer.inlineCallbacks
    def on_make_join_request(self, room_id, user_id):
        pdu = yield self.handler.on_make_join_request(room_id, user_id)
        time_now = self._clock.time_msec()
        defer.returnValue({"event": pdu.get_pdu_json(time_now)})

    @defer.inlineCallbacks
    def on_invite_request(self, origin, content):
        pdu = self.event_from_pdu_json(content)
        ret_pdu = yield self.handler.on_invite_request(origin, pdu)
        time_now = self._clock.time_msec()
        defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))

    @defer.inlineCallbacks
    def on_send_join_request(self, origin, content):
        logger.debug("on_send_join_request: content: %s", content)
        pdu = self.event_from_pdu_json(content)
        logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
        res_pdus = yield self.handler.on_send_join_request(origin, pdu)
        time_now = self._clock.time_msec()
        defer.returnValue((200, {
            "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
            "auth_chain": [
                p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
            ],
        }))

    @defer.inlineCallbacks
    def on_make_leave_request(self, room_id, user_id):
        pdu = yield self.handler.on_make_leave_request(room_id, user_id)
        time_now = self._clock.time_msec()
        defer.returnValue({"event": pdu.get_pdu_json(time_now)})

    @defer.inlineCallbacks
    def on_send_leave_request(self, origin, content):
        logger.debug("on_send_leave_request: content: %s", content)
        pdu = self.event_from_pdu_json(content)
        logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
        yield self.handler.on_send_leave_request(origin, pdu)
        defer.returnValue((200, {}))

    @defer.inlineCallbacks
    def on_event_auth(self, origin, room_id, event_id):
        with (yield self._server_linearizer.queue((origin, room_id))):
            time_now = self._clock.time_msec()
            auth_pdus = yield self.handler.on_event_auth(event_id)
            res = {
                "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
            }
        defer.returnValue((200, res))

    @defer.inlineCallbacks
    def on_query_auth_request(self, origin, content, room_id, event_id):
        """
        Content is a dict with keys::
            auth_chain (list): A list of events that give the auth chain.
            missing (list): A list of event_ids indicating what the other
              side (`origin`) think we're missing.
            rejects (dict): A mapping from event_id to a 2-tuple of reason
              string and a proof (or None) of why the event was rejected.
              The keys of this dict give the list of events the `origin` has
              rejected.

        Args:
            origin (str)
            content (dict)
            event_id (str)

        Returns:
            Deferred: Results in `dict` with the same format as `content`
        """
        with (yield self._server_linearizer.queue((origin, room_id))):
            auth_chain = [
                self.event_from_pdu_json(e)
                for e in content["auth_chain"]
            ]

            signed_auth = yield self._check_sigs_and_hash_and_fetch(
                origin, auth_chain, outlier=True
            )

            ret = yield self.handler.on_query_auth(
                origin,
                event_id,
                signed_auth,
                content.get("rejects", []),
                content.get("missing", []),
            )

            time_now = self._clock.time_msec()
            send_content = {
                "auth_chain": [
                    e.get_pdu_json(time_now)
                    for e in ret["auth_chain"]
                ],
                "rejects": ret.get("rejects", []),
                "missing": ret.get("missing", []),
            }

        defer.returnValue(
            (200, send_content)
        )

    @log_function
    def on_query_client_keys(self, origin, content):
        return self.on_query_request("client_keys", content)

    @defer.inlineCallbacks
    @log_function
    def on_claim_client_keys(self, origin, content):
        query = []
        for user_id, device_keys in content.get("one_time_keys", {}).items():
            for device_id, algorithm in device_keys.items():
                query.append((user_id, device_id, algorithm))

        results = yield self.store.claim_e2e_one_time_keys(query)

        json_result = {}
        for user_id, device_keys in results.items():
            for device_id, keys in device_keys.items():
                for key_id, json_bytes in keys.items():
                    json_result.setdefault(user_id, {})[device_id] = {
                        key_id: json.loads(json_bytes)
                    }

        defer.returnValue({"one_time_keys": json_result})

    @defer.inlineCallbacks
    @log_function
    def on_get_missing_events(self, origin, room_id, earliest_events,
                              latest_events, limit, min_depth):
        with (yield self._server_linearizer.queue((origin, room_id))):
            logger.info(
                "on_get_missing_events: earliest_events: %r, latest_events: %r,"
                " limit: %d, min_depth: %d",
                earliest_events, latest_events, limit, min_depth
            )
            missing_events = yield self.handler.on_get_missing_events(
                origin, room_id, earliest_events, latest_events, limit, min_depth
            )

            if len(missing_events) < 5:
                logger.info(
                    "Returning %d events: %r", len(missing_events), missing_events
                )
            else:
                logger.info("Returning %d events", len(missing_events))

            time_now = self._clock.time_msec()

        defer.returnValue({
            "events": [ev.get_pdu_json(time_now) for ev in missing_events],
        })

    @log_function
    def on_openid_userinfo(self, token):
        ts_now_ms = self._clock.time_msec()
        return self.store.get_user_id_for_open_id_token(token, ts_now_ms)

    @log_function
    def _get_persisted_pdu(self, origin, event_id, do_auth=True):
        """ Get a PDU from the database with given origin and id.

        Returns:
            Deferred: Results in a `Pdu`.
        """
        return self.handler.get_persisted_pdu(
            origin, event_id, do_auth=do_auth
        )

    def _transaction_from_pdus(self, pdu_list):
        """Returns a new Transaction containing the given PDUs suitable for
        transmission.
        """
        time_now = self._clock.time_msec()
        pdus = [p.get_pdu_json(time_now) for p in pdu_list]
        return Transaction(
            origin=self.server_name,
            pdus=pdus,
            origin_server_ts=int(time_now),
            destination=None,
        )

    @defer.inlineCallbacks
    @log_function
    def _handle_new_pdu(self, origin, pdu, get_missing=True):
        # We reprocess pdus when we have seen them only as outliers
        existing = yield self._get_persisted_pdu(
            origin, pdu.event_id, do_auth=False
        )

        # FIXME: Currently we fetch an event again when we already have it
        # if it has been marked as an outlier.

        already_seen = (
            existing and (
                not existing.internal_metadata.is_outlier()
                or pdu.internal_metadata.is_outlier()
            )
        )
        if already_seen:
            logger.debug("Already seen pdu %s", pdu.event_id)
            return

        # Check signature.
        try:
            pdu = yield self._check_sigs_and_hash(pdu)
        except SynapseError as e:
            raise FederationError(
                "ERROR",
                e.code,
                e.msg,
                affected=pdu.event_id,
            )

        state = None

        auth_chain = []

        have_seen = yield self.store.have_events(
            [ev for ev, _ in pdu.prev_events]
        )

        fetch_state = False

        # Get missing pdus if necessary.
        if not pdu.internal_metadata.is_outlier():
            # We only backfill backwards to the min depth.
            min_depth = yield self.handler.get_min_depth_for_context(
                pdu.room_id
            )

            logger.debug(
                "_handle_new_pdu min_depth for %s: %d",
                pdu.room_id, min_depth
            )

            prevs = {e_id for e_id, _ in pdu.prev_events}
            seen = set(have_seen.keys())

            if min_depth and pdu.depth < min_depth:
                # This is so that we don't notify the user about this
                # message, to work around the fact that some events will
                # reference really really old events we really don't want to
                # send to the clients.
                pdu.internal_metadata.outlier = True
            elif min_depth and pdu.depth > min_depth:
                if get_missing and prevs - seen:
                    # If we're missing stuff, ensure we only fetch stuff one
                    # at a time.
                    with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
                        # We recalculate seen, since it may have changed.
                        have_seen = yield self.store.have_events(prevs)
                        seen = set(have_seen.keys())

                        if prevs - seen:
                            latest = yield self.store.get_latest_event_ids_in_room(
                                pdu.room_id
                            )

                            # We add the prev events that we have seen to the latest
                            # list to ensure the remote server doesn't give them to us
                            latest = set(latest)
                            latest |= seen

                            logger.info(
                                "Missing %d events for room %r: %r...",
                                len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
                            )

                            missing_events = yield self.get_missing_events(
                                origin,
                                pdu.room_id,
                                earliest_events_ids=list(latest),
                                latest_events=[pdu],
                                limit=10,
                                min_depth=min_depth,
                            )

                            # We want to sort these by depth so we process them and
                            # tell clients about them in order.
                            missing_events.sort(key=lambda x: x.depth)

                            for e in missing_events:
                                yield self._handle_new_pdu(
                                    origin,
                                    e,
                                    get_missing=False
                                )

                            have_seen = yield self.store.have_events(
                                [ev for ev, _ in pdu.prev_events]
                            )

            prevs = {e_id for e_id, _ in pdu.prev_events}
            seen = set(have_seen.keys())
            if prevs - seen:
                logger.info(
                    "Still missing %d events for room %r: %r...",
                    len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
                )
                fetch_state = True

        if fetch_state:
            # We need to get the state at this event, since we haven't
            # processed all the prev events.
            logger.debug(
                "_handle_new_pdu getting state for %s",
                pdu.room_id
            )
            try:
                state, auth_chain = yield self.get_state_for_room(
                    origin, pdu.room_id, pdu.event_id,
                )
            except:
                logger.exception("Failed to get state for event: %s", pdu.event_id)

        yield self.handler.on_receive_pdu(
            origin,
            pdu,
            state=state,
            auth_chain=auth_chain,
        )

    def __str__(self):
        return "<ReplicationLayer(%s)>" % self.server_name

    def event_from_pdu_json(self, pdu_json, outlier=False):
        event = FrozenEvent(
            pdu_json
        )

        event.internal_metadata.outlier = outlier

        return event

    @defer.inlineCallbacks
    def exchange_third_party_invite(
            self,
            sender_user_id,
            target_user_id,
            room_id,
            signed,
    ):
        ret = yield self.handler.exchange_third_party_invite(
            sender_user_id,
            target_user_id,
            room_id,
            signed,
        )
        defer.returnValue(ret)

    @defer.inlineCallbacks
    def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
        ret = yield self.handler.on_exchange_third_party_invite_request(
            origin, room_id, event_dict
        )
        defer.returnValue(ret)