Ejemplo n.º 1
0
    def _start_pushers(self):
        """Start all the pushers

        Returns:
            Deferred
        """
        pushers = yield self.store.get_all_pushers()

        # Stagger starting up the pushers so we don't completely drown the
        # process on start up.
        yield concurrently_execute(self._start_pusher, pushers, 10)

        logger.info("Started pushers")
Ejemplo n.º 2
0
    def _start_pushers(self):
        """Start all the pushers

        Returns:
            Deferred
        """
        pushers = yield self.store.get_all_pushers()
        logger.info("Starting %d pushers", len(pushers))

        # Stagger starting up the pushers so we don't completely drown the
        # process on start up.
        yield concurrently_execute(self._start_pusher, pushers, 10)

        logger.info("Started pushers")
Ejemplo n.º 3
0
    def test_limits_runners(self):
        """If we have more tasks than runners, we should get the limit of runners"""
        started = 0
        waiters = []
        processed = []

        async def callback(v):
            # when we first enter, bump the start count
            nonlocal started
            started += 1

            # record the fact we got an item
            processed.append(v)

            # wait for the goahead before returning
            d2 = Deferred()
            waiters.append(d2)
            await d2

        # set it going
        d2 = ensureDeferred(concurrently_execute(callback, [1, 2, 3, 4, 5], 3))

        # check we got exactly 3 processes
        self.assertEqual(started, 3)
        self.assertEqual(len(waiters), 3)

        # let one finish
        waiters.pop().callback(0)

        # ... which should start another
        self.assertEqual(started, 4)
        self.assertEqual(len(waiters), 3)

        # we still shouldn't be done
        self.assertNoResult(d2)

        # finish the job
        while waiters:
            waiters.pop().callback(0)

        # check everything got done
        self.assertEqual(started, 5)
        self.assertCountEqual(processed, [1, 2, 3, 4, 5])
        self.successResultOf(d2)
Ejemplo n.º 4
0
    def _handle_incoming_transaction(self, origin, transaction, request_time):
        """ Process an incoming transaction and return the HTTP response

        Args:
            origin (unicode): the server making the request
            transaction (Transaction): incoming transaction
            request_time (int): timestamp that the HTTP request arrived at

        Returns:
            Deferred[(int, object)]: http response code and body
        """
        response = yield self.transaction_actions.have_responded(
            origin, transaction)

        if response:
            logger.debug("[%s] We've already responded to this request",
                         transaction.transaction_id)
            defer.returnValue(response)
            return

        logger.debug("[%s] Transaction is new", transaction.transaction_id)

        received_pdus_counter.inc(len(transaction.pdus))

        origin_host, _ = parse_server_name(origin)

        pdus_by_room = {}

        for p in transaction.pdus:
            if "unsigned" in p:
                unsigned = p["unsigned"]
                if "age" in unsigned:
                    p["age"] = unsigned["age"]
            if "age" in p:
                p["age_ts"] = request_time - int(p["age"])
                del p["age"]

            event = event_from_pdu_json(p)
            room_id = event.room_id
            pdus_by_room.setdefault(room_id, []).append(event)

        pdu_results = {}

        # we can process different rooms in parallel (which is useful if they
        # require callouts to other servers to fetch missing events), but
        # impose a limit to avoid going too crazy with ram/cpu.

        @defer.inlineCallbacks
        def process_pdus_for_room(room_id):
            logger.debug("Processing PDUs for %s", room_id)
            try:
                yield self.check_server_matches_acl(origin_host, room_id)
            except AuthError as e:
                logger.warn(
                    "Ignoring PDUs for room %s from banned server",
                    room_id,
                )
                for pdu in pdus_by_room[room_id]:
                    event_id = pdu.event_id
                    pdu_results[event_id] = e.error_dict()
                return

            for pdu in pdus_by_room[room_id]:
                event_id = pdu.event_id
                with nested_logging_context(event_id):
                    try:
                        yield self._handle_received_pdu(origin, pdu)
                        pdu_results[event_id] = {}
                    except FederationError as e:
                        logger.warn("Error handling PDU %s: %s", event_id, e)
                        pdu_results[event_id] = {"error": str(e)}
                    except Exception as e:
                        f = failure.Failure()
                        pdu_results[event_id] = {"error": str(e)}
                        logger.error(
                            "Failed to handle PDU %s: %s",
                            event_id,
                            f.getTraceback().rstrip(),
                        )

        yield concurrently_execute(
            process_pdus_for_room,
            pdus_by_room.keys(),
            TRANSACTION_CONCURRENCY_LIMIT,
        )

        if hasattr(transaction, "edus"):
            for edu in (Edu(**x) for x in transaction.edus):
                yield self.received_edu(origin, edu.edu_type, edu.content)

        response = {
            "pdus": pdu_results,
        }

        logger.debug("Returning: %s", str(response))

        yield self.transaction_actions.set_response(origin, transaction, 200,
                                                    response)
        defer.returnValue((200, response))
Ejemplo n.º 5
0
    def _snapshot_all_rooms(self,
                            user_id=None,
                            pagin_config=None,
                            as_client_event=True,
                            include_archived=False):

        memberships = [Membership.INVITE, Membership.JOIN]
        if include_archived:
            memberships.append(Membership.LEAVE)

        room_list = yield self.store.get_rooms_for_user_where_membership_is(
            user_id=user_id, membership_list=memberships)

        user = UserID.from_string(user_id)

        rooms_ret = []

        now_token = yield self.hs.get_event_sources().get_current_token()

        presence_stream = self.hs.get_event_sources().sources["presence"]
        pagination_config = PaginationConfig(from_token=now_token)
        presence, _ = yield presence_stream.get_pagination_rows(
            user, pagination_config.get_source_config("presence"), None)

        receipt_stream = self.hs.get_event_sources().sources["receipt"]
        receipt, _ = yield receipt_stream.get_pagination_rows(
            user, pagination_config.get_source_config("receipt"), None)

        tags_by_room = yield self.store.get_tags_for_user(user_id)

        account_data, account_data_by_room = (
            yield self.store.get_account_data_for_user(user_id))

        public_room_ids = yield self.store.get_public_room_ids()

        limit = pagin_config.limit
        if limit is None:
            limit = 10

        @defer.inlineCallbacks
        def handle_room(event):
            d = {
                "room_id":
                event.room_id,
                "membership":
                event.membership,
                "visibility":
                ("public" if event.room_id in public_room_ids else "private"),
            }

            if event.membership == Membership.INVITE:
                time_now = self.clock.time_msec()
                d["inviter"] = event.sender

                invite_event = yield self.store.get_event(event.event_id)
                d["invite"] = yield self._event_serializer.serialize_event(
                    invite_event,
                    time_now,
                    as_client_event,
                )

            rooms_ret.append(d)

            if event.membership not in (Membership.JOIN, Membership.LEAVE):
                return

            try:
                if event.membership == Membership.JOIN:
                    room_end_token = now_token.room_key
                    deferred_room_state = run_in_background(
                        self.state_handler.get_current_state,
                        event.room_id,
                    )
                elif event.membership == Membership.LEAVE:
                    room_end_token = "s%d" % (event.stream_ordering, )
                    deferred_room_state = run_in_background(
                        self.store.get_state_for_events,
                        [event.event_id],
                    )
                    deferred_room_state.addCallback(
                        lambda states: states[event.event_id])

                (messages,
                 token), current_state = yield make_deferred_yieldable(
                     defer.gatherResults([
                         run_in_background(
                             self.store.get_recent_events_for_room,
                             event.room_id,
                             limit=limit,
                             end_token=room_end_token,
                         ),
                         deferred_room_state,
                     ])).addErrback(unwrapFirstError)

                messages = yield filter_events_for_client(
                    self.store, user_id, messages)

                start_token = now_token.copy_and_replace("room_key", token)
                end_token = now_token.copy_and_replace("room_key",
                                                       room_end_token)
                time_now = self.clock.time_msec()

                d["messages"] = {
                    "chunk": (yield self._event_serializer.serialize_events(
                        messages,
                        time_now=time_now,
                        as_client_event=as_client_event,
                    )),
                    "start":
                    start_token.to_string(),
                    "end":
                    end_token.to_string(),
                }

                d["state"] = yield self._event_serializer.serialize_events(
                    current_state.values(),
                    time_now=time_now,
                    as_client_event=as_client_event)

                account_data_events = []
                tags = tags_by_room.get(event.room_id)
                if tags:
                    account_data_events.append({
                        "type": "m.tag",
                        "content": {
                            "tags": tags
                        },
                    })

                account_data = account_data_by_room.get(event.room_id, {})
                for account_data_type, content in account_data.items():
                    account_data_events.append({
                        "type": account_data_type,
                        "content": content,
                    })

                d["account_data"] = account_data_events
            except Exception:
                logger.exception("Failed to get snapshot")

        yield concurrently_execute(handle_room, room_list, 10)

        account_data_events = []
        for account_data_type, content in account_data.items():
            account_data_events.append({
                "type": account_data_type,
                "content": content,
            })

        now = self.clock.time_msec()

        ret = {
            "rooms":
            rooms_ret,
            "presence": [{
                "type": "m.presence",
                "content": format_user_presence_state(event, now),
            } for event in presence],
            "account_data":
            account_data_events,
            "receipts":
            receipt,
            "end":
            now_token.to_string(),
        }

        defer.returnValue(ret)
Ejemplo n.º 6
0
    def send_notification_mail(self, app_id, user_id, email_address,
                               push_actions, reason):
        try:
            from_string = self.hs.config.email_notif_from % {
                "app": self.app_name
            }
        except TypeError:
            from_string = self.hs.config.email_notif_from

        raw_from = email.utils.parseaddr(from_string)[1]
        raw_to = email.utils.parseaddr(email_address)[1]

        if raw_to == '':
            raise RuntimeError("Invalid 'to' address")

        rooms_in_order = deduped_ordered_list(
            [pa['room_id'] for pa in push_actions]
        )

        notif_events = yield self.store.get_events(
            [pa['event_id'] for pa in push_actions]
        )

        notifs_by_room = {}
        for pa in push_actions:
            notifs_by_room.setdefault(pa["room_id"], []).append(pa)

        # collect the current state for all the rooms in which we have
        # notifications
        state_by_room = {}

        try:
            user_display_name = yield self.store.get_profile_displayname(
                UserID.from_string(user_id).localpart
            )
            if user_display_name is None:
                user_display_name = user_id
        except StoreError:
            user_display_name = user_id

        @defer.inlineCallbacks
        def _fetch_room_state(room_id):
            room_state = yield self.store.get_current_state_ids(room_id)
            state_by_room[room_id] = room_state

        # Run at most 3 of these at once: sync does 10 at a time but email
        # notifs are much less realtime than sync so we can afford to wait a bit.
        yield concurrently_execute(_fetch_room_state, rooms_in_order, 3)

        # actually sort our so-called rooms_in_order list, most recent room first
        rooms_in_order.sort(
            key=lambda r: -(notifs_by_room[r][-1]['received_ts'] or 0)
        )

        rooms = []

        for r in rooms_in_order:
            roomvars = yield self.get_room_vars(
                r, user_id, notifs_by_room[r], notif_events, state_by_room[r]
            )
            rooms.append(roomvars)

        reason['room_name'] = yield calculate_room_name(
            self.store, state_by_room[reason['room_id']], user_id,
            fallback_to_members=True
        )

        summary_text = yield self.make_summary_text(
            notifs_by_room, state_by_room, notif_events, user_id, reason
        )

        template_vars = {
            "user_display_name": user_display_name,
            "unsubscribe_link": self.make_unsubscribe_link(
                user_id, app_id, email_address
            ),
            "summary_text": summary_text,
            "app_name": self.app_name,
            "rooms": rooms,
            "reason": reason,
        }

        html_text = self.notif_template_html.render(**template_vars)
        html_part = MIMEText(html_text, "html", "utf8")

        plain_text = self.notif_template_text.render(**template_vars)
        text_part = MIMEText(plain_text, "plain", "utf8")

        multipart_msg = MIMEMultipart('alternative')
        multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
        multipart_msg['From'] = from_string
        multipart_msg['To'] = email_address
        multipart_msg['Date'] = email.utils.formatdate()
        multipart_msg['Message-ID'] = email.utils.make_msgid()
        multipart_msg.attach(text_part)
        multipart_msg.attach(html_part)

        logger.info("Sending email push notification to %s" % email_address)
        # logger.debug(html_text)

        yield sendmail(
            self.hs.config.email_smtp_host,
            raw_from, raw_to, multipart_msg.as_string(),
            port=self.hs.config.email_smtp_port,
            requireAuthentication=self.hs.config.email_smtp_user is not None,
            username=self.hs.config.email_smtp_user,
            password=self.hs.config.email_smtp_pass,
            requireTransportSecurity=self.hs.config.require_transport_security
        )
Ejemplo n.º 7
0
    def _get_public_room_list(self, limit=None, since_token=None,
                              search_filter=None,
                              network_tuple=EMPTY_THIRD_PARTY_ID,):
        if since_token and since_token != "END":
            since_token = RoomListNextBatch.from_token(since_token)
        else:
            since_token = None

        rooms_to_order_value = {}
        rooms_to_num_joined = {}

        newly_visible = []
        newly_unpublished = []
        if since_token:
            stream_token = since_token.stream_ordering
            current_public_id = yield self.store.get_current_public_room_stream_id()
            public_room_stream_id = since_token.public_room_stream_id
            newly_visible, newly_unpublished = yield self.store.get_public_room_changes(
                public_room_stream_id, current_public_id,
                network_tuple=network_tuple,
            )
        else:
            stream_token = yield self.store.get_room_max_stream_ordering()
            public_room_stream_id = yield self.store.get_current_public_room_stream_id()

        room_ids = yield self.store.get_public_room_ids_at_stream_id(
            public_room_stream_id, network_tuple=network_tuple,
        )

        # We want to return rooms in a particular order: the number of joined
        # users. We then arbitrarily use the room_id as a tie breaker.

        @defer.inlineCallbacks
        def get_order_for_room(room_id):
            # Most of the rooms won't have changed between the since token and
            # now (especially if the since token is "now"). So, we can ask what
            # the current users are in a room (that will hit a cache) and then
            # check if the room has changed since the since token. (We have to
            # do it in that order to avoid races).
            # If things have changed then fall back to getting the current state
            # at the since token.
            joined_users = yield self.store.get_users_in_room(room_id)
            if self.store.has_room_changed_since(room_id, stream_token):
                latest_event_ids = yield self.store.get_forward_extremeties_for_room(
                    room_id, stream_token
                )

                if not latest_event_ids:
                    return

                joined_users = yield self.state_handler.get_current_user_in_room(
                    room_id, latest_event_ids,
                )

            num_joined_users = len(joined_users)
            rooms_to_num_joined[room_id] = num_joined_users

            if num_joined_users == 0:
                return

            # We want larger rooms to be first, hence negating num_joined_users
            rooms_to_order_value[room_id] = (-num_joined_users, room_id)

        logger.info("Getting ordering for %i rooms since %s",
                    len(room_ids), stream_token)
        yield concurrently_execute(get_order_for_room, room_ids, 10)

        sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
        sorted_rooms = [room_id for room_id, _ in sorted_entries]

        # `sorted_rooms` should now be a list of all public room ids that is
        # stable across pagination. Therefore, we can use indices into this
        # list as our pagination tokens.

        # Filter out rooms that we don't want to return
        rooms_to_scan = [
            r for r in sorted_rooms
            if r not in newly_unpublished and rooms_to_num_joined[r] > 0
        ]

        total_room_count = len(rooms_to_scan)

        if since_token:
            # Filter out rooms we've already returned previously
            # `since_token.current_limit` is the index of the last room we
            # sent down, so we exclude it and everything before/after it.
            if since_token.direction_is_forward:
                rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
            else:
                rooms_to_scan = rooms_to_scan[:since_token.current_limit]
                rooms_to_scan.reverse()

        logger.info("After sorting and filtering, %i rooms remain",
                    len(rooms_to_scan))

        # _append_room_entry_to_chunk will append to chunk but will stop if
        # len(chunk) > limit
        #
        # Normally we will generate enough results on the first iteration here,
        #  but if there is a search filter, _append_room_entry_to_chunk may
        # filter some results out, in which case we loop again.
        #
        # We don't want to scan over the entire range either as that
        # would potentially waste a lot of work.
        #
        # XXX if there is no limit, we may end up DoSing the server with
        # calls to get_current_state_ids for every single room on the
        # server. Surely we should cap this somehow?
        #
        if limit:
            step = limit + 1
        else:
            # step cannot be zero
            step = len(rooms_to_scan) if len(rooms_to_scan) != 0 else 1

        chunk = []
        for i in range(0, len(rooms_to_scan), step):
            batch = rooms_to_scan[i:i + step]
            logger.info("Processing %i rooms for result", len(batch))
            yield concurrently_execute(
                lambda r: self._append_room_entry_to_chunk(
                    r, rooms_to_num_joined[r],
                    chunk, limit, search_filter
                ),
                batch, 5,
            )
            logger.info("Now %i rooms in result", len(chunk))
            if len(chunk) >= limit + 1:
                break

        chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))

        # Work out the new limit of the batch for pagination, or None if we
        # know there are no more results that would be returned.
        # i.e., [since_token.current_limit..new_limit] is the batch of rooms
        # we've returned (or the reverse if we paginated backwards)
        # We tried to pull out limit + 1 rooms above, so if we have <= limit
        # then we know there are no more results to return
        new_limit = None
        if chunk and (not limit or len(chunk) > limit):

            if not since_token or since_token.direction_is_forward:
                if limit:
                    chunk = chunk[:limit]
                last_room_id = chunk[-1]["room_id"]
            else:
                if limit:
                    chunk = chunk[-limit:]
                last_room_id = chunk[0]["room_id"]

            new_limit = sorted_rooms.index(last_room_id)

        results = {
            "chunk": chunk,
            "total_room_count_estimate": total_room_count,
        }

        if since_token:
            results["new_rooms"] = bool(newly_visible)

        if not since_token or since_token.direction_is_forward:
            if new_limit is not None:
                results["next_batch"] = RoomListNextBatch(
                    stream_ordering=stream_token,
                    public_room_stream_id=public_room_stream_id,
                    current_limit=new_limit,
                    direction_is_forward=True,
                ).to_token()

            if since_token:
                results["prev_batch"] = since_token.copy_and_replace(
                    direction_is_forward=False,
                    current_limit=since_token.current_limit + 1,
                ).to_token()
        else:
            if new_limit is not None:
                results["prev_batch"] = RoomListNextBatch(
                    stream_ordering=stream_token,
                    public_room_stream_id=public_room_stream_id,
                    current_limit=new_limit,
                    direction_is_forward=False,
                ).to_token()

            if since_token:
                results["next_batch"] = since_token.copy_and_replace(
                    direction_is_forward=True,
                    current_limit=since_token.current_limit - 1,
                ).to_token()

        defer.returnValue(results)
Ejemplo n.º 8
0
    def send_notification_mail(
        self, app_id, user_id, email_address, push_actions, reason
    ):
        """Send email regarding a user's room notifications"""
        rooms_in_order = deduped_ordered_list([pa["room_id"] for pa in push_actions])

        notif_events = yield self.store.get_events(
            [pa["event_id"] for pa in push_actions]
        )

        notifs_by_room = {}
        for pa in push_actions:
            notifs_by_room.setdefault(pa["room_id"], []).append(pa)

        # collect the current state for all the rooms in which we have
        # notifications
        state_by_room = {}

        try:
            user_display_name = yield self.store.get_profile_displayname(
                UserID.from_string(user_id).localpart
            )
            if user_display_name is None:
                user_display_name = user_id
        except StoreError:
            user_display_name = user_id

        @defer.inlineCallbacks
        def _fetch_room_state(room_id):
            room_state = yield self.store.get_current_state_ids(room_id)
            state_by_room[room_id] = room_state

        # Run at most 3 of these at once: sync does 10 at a time but email
        # notifs are much less realtime than sync so we can afford to wait a bit.
        yield concurrently_execute(_fetch_room_state, rooms_in_order, 3)

        # actually sort our so-called rooms_in_order list, most recent room first
        rooms_in_order.sort(key=lambda r: -(notifs_by_room[r][-1]["received_ts"] or 0))

        rooms = []

        for r in rooms_in_order:
            roomvars = yield self.get_room_vars(
                r, user_id, notifs_by_room[r], notif_events, state_by_room[r]
            )
            rooms.append(roomvars)

        reason["room_name"] = yield calculate_room_name(
            self.store,
            state_by_room[reason["room_id"]],
            user_id,
            fallback_to_members=True,
        )

        summary_text = yield self.make_summary_text(
            notifs_by_room, state_by_room, notif_events, user_id, reason
        )

        template_vars = {
            "user_display_name": user_display_name,
            "unsubscribe_link": self.make_unsubscribe_link(
                user_id, app_id, email_address
            ),
            "summary_text": summary_text,
            "app_name": self.app_name,
            "rooms": rooms,
            "reason": reason,
        }

        yield self.send_email(
            email_address, "[%s] %s" % (self.app_name, summary_text), template_vars
        )
Ejemplo n.º 9
0
    def _handle_incoming_transaction(self, origin, transaction, request_time):
        """ Process an incoming transaction and return the HTTP response

        Args:
            origin (unicode): the server making the request
            transaction (Transaction): incoming transaction
            request_time (int): timestamp that the HTTP request arrived at

        Returns:
            Deferred[(int, object)]: http response code and body
        """
        response = yield self.transaction_actions.have_responded(
            origin, transaction)

        if response:
            logger.debug(
                "[%s] We've already responded to this request",
                transaction.transaction_id,
            )
            defer.returnValue(response)
            return

        logger.debug("[%s] Transaction is new", transaction.transaction_id)

        # Reject if PDU count > 50 and EDU count > 100
        if len(transaction.pdus) > 50 or (hasattr(transaction, "edus")
                                          and len(transaction.edus) > 100):

            logger.info(
                "Transaction PDU or EDU count too large. Returning 400")

            response = {}
            yield self.transaction_actions.set_response(
                origin, transaction, 400, response)
            defer.returnValue((400, response))

        received_pdus_counter.inc(len(transaction.pdus))

        origin_host, _ = parse_server_name(origin)

        pdus_by_room = {}

        for p in transaction.pdus:
            if "unsigned" in p:
                unsigned = p["unsigned"]
                if "age" in unsigned:
                    p["age"] = unsigned["age"]
            if "age" in p:
                p["age_ts"] = request_time - int(p["age"])
                del p["age"]

            # We try and pull out an event ID so that if later checks fail we
            # can log something sensible. We don't mandate an event ID here in
            # case future event formats get rid of the key.
            possible_event_id = p.get("event_id", "<Unknown>")

            # Now we get the room ID so that we can check that we know the
            # version of the room.
            room_id = p.get("room_id")
            if not room_id:
                logger.info(
                    "Ignoring PDU as does not have a room_id. Event ID: %s",
                    possible_event_id,
                )
                continue

            try:
                room_version = yield self.store.get_room_version(room_id)
            except NotFoundError:
                logger.info("Ignoring PDU for unknown room_id: %s", room_id)
                continue

            try:
                format_ver = room_version_to_event_format(room_version)
            except UnsupportedRoomVersionError:
                # this can happen if support for a given room version is withdrawn,
                # so that we still get events for said room.
                logger.info(
                    "Ignoring PDU for room %s with unknown version %s",
                    room_id,
                    room_version,
                )
                continue

            event = event_from_pdu_json(p, format_ver)
            pdus_by_room.setdefault(room_id, []).append(event)

        pdu_results = {}

        # we can process different rooms in parallel (which is useful if they
        # require callouts to other servers to fetch missing events), but
        # impose a limit to avoid going too crazy with ram/cpu.

        @defer.inlineCallbacks
        def process_pdus_for_room(room_id):
            logger.debug("Processing PDUs for %s", room_id)
            try:
                yield self.check_server_matches_acl(origin_host, room_id)
            except AuthError as e:
                logger.warn("Ignoring PDUs for room %s from banned server",
                            room_id)
                for pdu in pdus_by_room[room_id]:
                    event_id = pdu.event_id
                    pdu_results[event_id] = e.error_dict()
                return

            for pdu in pdus_by_room[room_id]:
                event_id = pdu.event_id
                with nested_logging_context(event_id):
                    try:
                        yield self._handle_received_pdu(origin, pdu)
                        pdu_results[event_id] = {}
                    except FederationError as e:
                        logger.warn("Error handling PDU %s: %s", event_id, e)
                        pdu_results[event_id] = {"error": str(e)}
                    except Exception as e:
                        f = failure.Failure()
                        pdu_results[event_id] = {"error": str(e)}
                        logger.error(
                            "Failed to handle PDU %s",
                            event_id,
                            exc_info=(f.type, f.value, f.getTracebackObject()),
                        )

        yield concurrently_execute(process_pdus_for_room, pdus_by_room.keys(),
                                   TRANSACTION_CONCURRENCY_LIMIT)

        if hasattr(transaction, "edus"):
            for edu in (Edu(**x) for x in transaction.edus):
                yield self.received_edu(origin, edu.edu_type, edu.content)

        response = {"pdus": pdu_results}

        logger.debug("Returning: %s", str(response))

        yield self.transaction_actions.set_response(origin, transaction, 200,
                                                    response)
        defer.returnValue((200, response))
Ejemplo n.º 10
0
    def _snapshot_all_rooms(self, user_id=None, pagin_config=None,
                            as_client_event=True, include_archived=False):

        memberships = [Membership.INVITE, Membership.JOIN]
        if include_archived:
            memberships.append(Membership.LEAVE)

        room_list = yield self.store.get_rooms_for_user_where_membership_is(
            user_id=user_id, membership_list=memberships
        )

        user = UserID.from_string(user_id)

        rooms_ret = []

        now_token = yield self.hs.get_event_sources().get_current_token()

        presence_stream = self.hs.get_event_sources().sources["presence"]
        pagination_config = PaginationConfig(from_token=now_token)
        presence, _ = yield presence_stream.get_pagination_rows(
            user, pagination_config.get_source_config("presence"), None
        )

        receipt_stream = self.hs.get_event_sources().sources["receipt"]
        receipt, _ = yield receipt_stream.get_pagination_rows(
            user, pagination_config.get_source_config("receipt"), None
        )

        tags_by_room = yield self.store.get_tags_for_user(user_id)

        account_data, account_data_by_room = (
            yield self.store.get_account_data_for_user(user_id)
        )

        public_room_ids = yield self.store.get_public_room_ids()

        limit = pagin_config.limit
        if limit is None:
            limit = 10

        @defer.inlineCallbacks
        def handle_room(event):
            d = {
                "room_id": event.room_id,
                "membership": event.membership,
                "visibility": (
                    "public" if event.room_id in public_room_ids
                    else "private"
                ),
            }

            if event.membership == Membership.INVITE:
                time_now = self.clock.time_msec()
                d["inviter"] = event.sender

                invite_event = yield self.store.get_event(event.event_id)
                d["invite"] = serialize_event(invite_event, time_now, as_client_event)

            rooms_ret.append(d)

            if event.membership not in (Membership.JOIN, Membership.LEAVE):
                return

            try:
                if event.membership == Membership.JOIN:
                    room_end_token = now_token.room_key
                    deferred_room_state = run_in_background(
                        self.state_handler.get_current_state,
                        event.room_id,
                    )
                elif event.membership == Membership.LEAVE:
                    room_end_token = "s%d" % (event.stream_ordering,)
                    deferred_room_state = run_in_background(
                        self.store.get_state_for_events,
                        [event.event_id],
                    )
                    deferred_room_state.addCallback(
                        lambda states: states[event.event_id]
                    )

                (messages, token), current_state = yield make_deferred_yieldable(
                    defer.gatherResults(
                        [
                            run_in_background(
                                self.store.get_recent_events_for_room,
                                event.room_id,
                                limit=limit,
                                end_token=room_end_token,
                            ),
                            deferred_room_state,
                        ]
                    )
                ).addErrback(unwrapFirstError)

                messages = yield filter_events_for_client(
                    self.store, user_id, messages
                )

                start_token = now_token.copy_and_replace("room_key", token)
                end_token = now_token.copy_and_replace("room_key", room_end_token)
                time_now = self.clock.time_msec()

                d["messages"] = {
                    "chunk": [
                        serialize_event(m, time_now, as_client_event)
                        for m in messages
                    ],
                    "start": start_token.to_string(),
                    "end": end_token.to_string(),
                }

                d["state"] = [
                    serialize_event(c, time_now, as_client_event)
                    for c in current_state.values()
                ]

                account_data_events = []
                tags = tags_by_room.get(event.room_id)
                if tags:
                    account_data_events.append({
                        "type": "m.tag",
                        "content": {"tags": tags},
                    })

                account_data = account_data_by_room.get(event.room_id, {})
                for account_data_type, content in account_data.items():
                    account_data_events.append({
                        "type": account_data_type,
                        "content": content,
                    })

                d["account_data"] = account_data_events
            except Exception:
                logger.exception("Failed to get snapshot")

        yield concurrently_execute(handle_room, room_list, 10)

        account_data_events = []
        for account_data_type, content in account_data.items():
            account_data_events.append({
                "type": account_data_type,
                "content": content,
            })

        now = self.clock.time_msec()

        ret = {
            "rooms": rooms_ret,
            "presence": [
                {
                    "type": "m.presence",
                    "content": format_user_presence_state(event, now),
                }
                for event in presence
            ],
            "account_data": account_data_events,
            "receipts": receipt,
            "end": now_token.to_string(),
        }

        defer.returnValue(ret)
Ejemplo n.º 11
0
    def _get_public_room_list(self, limit=None, since_token=None,
                              search_filter=None,
                              network_tuple=EMPTY_THIRD_PARTY_ID,
                              from_federation=False,
                              timeout=None,):
        """Generate a public room list.
        Args:
            limit (int|None): Maximum amount of rooms to return.
            since_token (str|None)
            search_filter (dict|None): Dictionary to filter rooms by.
            network_tuple (ThirdPartyInstanceID): Which public list to use.
                This can be (None, None) to indicate the main list, or a particular
                appservice and network id to use an appservice specific one.
                Setting to None returns all public rooms across all lists.
            from_federation (bool): Whether this request originated from a
                federating server or a client. Used for room filtering.
            timeout (int|None): Amount of seconds to wait for a response before
                timing out.
        """
        if since_token and since_token != "END":
            since_token = RoomListNextBatch.from_token(since_token)
        else:
            since_token = None

        rooms_to_order_value = {}
        rooms_to_num_joined = {}

        newly_visible = []
        newly_unpublished = []
        if since_token:
            stream_token = since_token.stream_ordering
            current_public_id = yield self.store.get_current_public_room_stream_id()
            public_room_stream_id = since_token.public_room_stream_id
            newly_visible, newly_unpublished = yield self.store.get_public_room_changes(
                public_room_stream_id, current_public_id,
                network_tuple=network_tuple,
            )
        else:
            stream_token = yield self.store.get_room_max_stream_ordering()
            public_room_stream_id = yield self.store.get_current_public_room_stream_id()

        room_ids = yield self.store.get_public_room_ids_at_stream_id(
            public_room_stream_id, network_tuple=network_tuple,
        )

        # We want to return rooms in a particular order: the number of joined
        # users. We then arbitrarily use the room_id as a tie breaker.

        @defer.inlineCallbacks
        def get_order_for_room(room_id):
            # Most of the rooms won't have changed between the since token and
            # now (especially if the since token is "now"). So, we can ask what
            # the current users are in a room (that will hit a cache) and then
            # check if the room has changed since the since token. (We have to
            # do it in that order to avoid races).
            # If things have changed then fall back to getting the current state
            # at the since token.
            joined_users = yield self.store.get_users_in_room(room_id)
            if self.store.has_room_changed_since(room_id, stream_token):
                latest_event_ids = yield self.store.get_forward_extremeties_for_room(
                    room_id, stream_token
                )

                if not latest_event_ids:
                    return

                joined_users = yield self.state_handler.get_current_users_in_room(
                    room_id, latest_event_ids,
                )

            num_joined_users = len(joined_users)
            rooms_to_num_joined[room_id] = num_joined_users

            if num_joined_users == 0:
                return

            # We want larger rooms to be first, hence negating num_joined_users
            rooms_to_order_value[room_id] = (-num_joined_users, room_id)

        logger.info("Getting ordering for %i rooms since %s",
                    len(room_ids), stream_token)
        yield concurrently_execute(get_order_for_room, room_ids, 10)

        sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
        sorted_rooms = [room_id for room_id, _ in sorted_entries]

        # `sorted_rooms` should now be a list of all public room ids that is
        # stable across pagination. Therefore, we can use indices into this
        # list as our pagination tokens.

        # Filter out rooms that we don't want to return
        rooms_to_scan = [
            r for r in sorted_rooms
            if r not in newly_unpublished and rooms_to_num_joined[r] > 0
        ]

        total_room_count = len(rooms_to_scan)

        if since_token:
            # Filter out rooms we've already returned previously
            # `since_token.current_limit` is the index of the last room we
            # sent down, so we exclude it and everything before/after it.
            if since_token.direction_is_forward:
                rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
            else:
                rooms_to_scan = rooms_to_scan[:since_token.current_limit]
                rooms_to_scan.reverse()

        logger.info("After sorting and filtering, %i rooms remain",
                    len(rooms_to_scan))

        # _append_room_entry_to_chunk will append to chunk but will stop if
        # len(chunk) > limit
        #
        # Normally we will generate enough results on the first iteration here,
        #  but if there is a search filter, _append_room_entry_to_chunk may
        # filter some results out, in which case we loop again.
        #
        # We don't want to scan over the entire range either as that
        # would potentially waste a lot of work.
        #
        # XXX if there is no limit, we may end up DoSing the server with
        # calls to get_current_state_ids for every single room on the
        # server. Surely we should cap this somehow?
        #
        if limit:
            step = limit + 1
        else:
            # step cannot be zero
            step = len(rooms_to_scan) if len(rooms_to_scan) != 0 else 1

        chunk = []
        for i in range(0, len(rooms_to_scan), step):
            if timeout and self.clock.time() > timeout:
                raise Exception("Timed out searching room directory")

            batch = rooms_to_scan[i:i + step]
            logger.info("Processing %i rooms for result", len(batch))
            yield concurrently_execute(
                lambda r: self._append_room_entry_to_chunk(
                    r, rooms_to_num_joined[r],
                    chunk, limit, search_filter,
                    from_federation=from_federation,
                ),
                batch, 5,
            )
            logger.info("Now %i rooms in result", len(chunk))
            if len(chunk) >= limit + 1:
                break

        chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))

        # Work out the new limit of the batch for pagination, or None if we
        # know there are no more results that would be returned.
        # i.e., [since_token.current_limit..new_limit] is the batch of rooms
        # we've returned (or the reverse if we paginated backwards)
        # We tried to pull out limit + 1 rooms above, so if we have <= limit
        # then we know there are no more results to return
        new_limit = None
        if chunk and (not limit or len(chunk) > limit):

            if not since_token or since_token.direction_is_forward:
                if limit:
                    chunk = chunk[:limit]
                last_room_id = chunk[-1]["room_id"]
            else:
                if limit:
                    chunk = chunk[-limit:]
                last_room_id = chunk[0]["room_id"]

            new_limit = sorted_rooms.index(last_room_id)

        results = {
            "chunk": chunk,
            "total_room_count_estimate": total_room_count,
        }

        if since_token:
            results["new_rooms"] = bool(newly_visible)

        if not since_token or since_token.direction_is_forward:
            if new_limit is not None:
                results["next_batch"] = RoomListNextBatch(
                    stream_ordering=stream_token,
                    public_room_stream_id=public_room_stream_id,
                    current_limit=new_limit,
                    direction_is_forward=True,
                ).to_token()

            if since_token:
                results["prev_batch"] = since_token.copy_and_replace(
                    direction_is_forward=False,
                    current_limit=since_token.current_limit + 1,
                ).to_token()
        else:
            if new_limit is not None:
                results["prev_batch"] = RoomListNextBatch(
                    stream_ordering=stream_token,
                    public_room_stream_id=public_room_stream_id,
                    current_limit=new_limit,
                    direction_is_forward=False,
                ).to_token()

            if since_token:
                results["next_batch"] = since_token.copy_and_replace(
                    direction_is_forward=True,
                    current_limit=since_token.current_limit - 1,
                ).to_token()

        defer.returnValue(results)
Ejemplo n.º 12
0
    def delete_group(self, group_id, requester_user_id):
        """Deletes a group, kicking out all current members.

        Only group admins or server admins can call this request

        Args:
            group_id (str)
            request_user_id (str)

        Returns:
            Deferred
        """

        yield self.check_group_is_ours(group_id,
                                       requester_user_id,
                                       and_exists=True)

        # Only server admins or group admins can delete groups.

        is_admin = yield self.store.is_user_admin_in_group(
            group_id, requester_user_id)

        if not is_admin:
            is_admin = yield self.auth.is_server_admin(
                UserID.from_string(requester_user_id))

        if not is_admin:
            raise SynapseError(403, "User is not an admin")

        # Before deleting the group lets kick everyone out of it
        users = yield self.store.get_users_in_group(group_id,
                                                    include_private=True)

        @defer.inlineCallbacks
        def _kick_user_from_group(user_id):
            if self.hs.is_mine_id(user_id):
                groups_local = self.hs.get_groups_local_handler()
                yield groups_local.user_removed_from_group(
                    group_id, user_id, {})
            else:
                yield self.transport_client.remove_user_from_group_notification(
                    get_domain_from_id(user_id), group_id, user_id, {})
                yield self.store.maybe_delete_remote_profile_cache(user_id)

        # We kick users out in the order of:
        #   1. Non-admins
        #   2. Other admins
        #   3. The requester
        #
        # This is so that if the deletion fails for some reason other admins or
        # the requester still has auth to retry.
        non_admins = []
        admins = []
        for u in users:
            if u["user_id"] == requester_user_id:
                continue
            if u["is_admin"]:
                admins.append(u["user_id"])
            else:
                non_admins.append(u["user_id"])

        yield concurrently_execute(_kick_user_from_group, non_admins, 10)
        yield concurrently_execute(_kick_user_from_group, admins, 10)
        yield _kick_user_from_group(requester_user_id)

        yield self.store.delete_group(group_id)
Ejemplo n.º 13
0
    def delete_group(self, group_id, requester_user_id):
        """Deletes a group, kicking out all current members.

        Only group admins or server admins can call this request

        Args:
            group_id (str)
            request_user_id (str)

        Returns:
            Deferred
        """

        yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)

        # Only server admins or group admins can delete groups.

        is_admin = yield self.store.is_user_admin_in_group(group_id, requester_user_id)

        if not is_admin:
            is_admin = yield self.auth.is_server_admin(
                UserID.from_string(requester_user_id)
            )

        if not is_admin:
            raise SynapseError(403, "User is not an admin")

        # Before deleting the group lets kick everyone out of it
        users = yield self.store.get_users_in_group(group_id, include_private=True)

        @defer.inlineCallbacks
        def _kick_user_from_group(user_id):
            if self.hs.is_mine_id(user_id):
                groups_local = self.hs.get_groups_local_handler()
                yield groups_local.user_removed_from_group(group_id, user_id, {})
            else:
                yield self.transport_client.remove_user_from_group_notification(
                    get_domain_from_id(user_id), group_id, user_id, {}
                )
                yield self.store.maybe_delete_remote_profile_cache(user_id)

        # We kick users out in the order of:
        #   1. Non-admins → non_admins
        #   2. Other admins → admins
        #   3. The requester → ?
        #
        # This is so that if the deletion fails for some reason other admins or
        # the requester still has auth to retry.

        # Maxigas: Refactor to functional paradigm
        non_admins = (u["user_id"] for u in users if not u["is_admin"])
        admins =  = (u["user_id"] for u in users if u["is_admin"] and not u["user_id"] == requester_user_id)

        # Maxigas: Refactor to even more functional style
        non_admins = set(map(lambda u: u["user_id"] if not u["is_admin"], users))
        admins = set(map(lambda u: u["user_id"] if u["is_admin"] and not u["user_id"] == requester_user_id, users))

        # Maxigas: Refactor using the map/filter/reduce pattern
        # Map an anonymous (lambda) function to a list:
        non_admins = set(map(lambda u: u["user_id"] if not u["is_admin"], users))
        admins = set(map(lambda u: u["user_id"] if u["is_admin"]), users)
        # Filter the list using an anonymous (lambda) function:
        admins = set(filter(lambda u: not u["user_id"] == requester_user_id, admins))

        # Maxigas: Refactor starts here ----
        non_admins = []
        admins = []
        
        for u in users:
            # Maxigas: the next two lines are unnecessary anyway...
            if u["user_id"] == requester_user_id:
                continue
            # Maxigas: ...because we also check for requester_user_id here
            if u["is_admin"] and not u["user_id"] == requester_user_id:
                admins.append(u["user_id"])
            else:
                non_admins.append(u["user_id"])
        # Maxigas: Refactor ends here ----

        yield concurrently_execute(_kick_user_from_group, non_admins, 10)
        yield concurrently_execute(_kick_user_from_group, admins, 10)
        yield _kick_user_from_group(requester_user_id)

        yield self.store.delete_group(group_id)
Ejemplo n.º 14
0
    def _handle_incoming_transaction(self, origin, transaction, request_time):
        """ Process an incoming transaction and return the HTTP response

        Args:
            origin (unicode): the server making the request
            transaction (Transaction): incoming transaction
            request_time (int): timestamp that the HTTP request arrived at

        Returns:
            Deferred[(int, object)]: http response code and body
        """
        response = yield self.transaction_actions.have_responded(origin, transaction)

        if response:
            logger.debug(
                "[%s] We've already responded to this request",
                transaction.transaction_id
            )
            defer.returnValue(response)
            return

        logger.debug("[%s] Transaction is new", transaction.transaction_id)

        # Reject if PDU count > 50 and EDU count > 100
        if (len(transaction.pdus) > 50
                or (hasattr(transaction, "edus") and len(transaction.edus) > 100)):

            logger.info(
                "Transaction PDU or EDU count too large. Returning 400",
            )

            response = {}
            yield self.transaction_actions.set_response(
                origin,
                transaction,
                400, response
            )
            defer.returnValue((400, response))

        received_pdus_counter.inc(len(transaction.pdus))

        origin_host, _ = parse_server_name(origin)

        pdus_by_room = {}

        for p in transaction.pdus:
            if "unsigned" in p:
                unsigned = p["unsigned"]
                if "age" in unsigned:
                    p["age"] = unsigned["age"]
            if "age" in p:
                p["age_ts"] = request_time - int(p["age"])
                del p["age"]

            # We try and pull out an event ID so that if later checks fail we
            # can log something sensible. We don't mandate an event ID here in
            # case future event formats get rid of the key.
            possible_event_id = p.get("event_id", "<Unknown>")

            # Now we get the room ID so that we can check that we know the
            # version of the room.
            room_id = p.get("room_id")
            if not room_id:
                logger.info(
                    "Ignoring PDU as does not have a room_id. Event ID: %s",
                    possible_event_id,
                )
                continue

            try:
                room_version = yield self.store.get_room_version(room_id)
            except NotFoundError:
                logger.info("Ignoring PDU for unknown room_id: %s", room_id)
                continue

            try:
                format_ver = room_version_to_event_format(room_version)
            except UnsupportedRoomVersionError:
                # this can happen if support for a given room version is withdrawn,
                # so that we still get events for said room.
                logger.info(
                    "Ignoring PDU for room %s with unknown version %s",
                    room_id,
                    room_version,
                )
                continue

            event = event_from_pdu_json(p, format_ver)
            pdus_by_room.setdefault(room_id, []).append(event)

        pdu_results = {}

        # we can process different rooms in parallel (which is useful if they
        # require callouts to other servers to fetch missing events), but
        # impose a limit to avoid going too crazy with ram/cpu.

        @defer.inlineCallbacks
        def process_pdus_for_room(room_id):
            logger.debug("Processing PDUs for %s", room_id)
            try:
                yield self.check_server_matches_acl(origin_host, room_id)
            except AuthError as e:
                logger.warn(
                    "Ignoring PDUs for room %s from banned server", room_id,
                )
                for pdu in pdus_by_room[room_id]:
                    event_id = pdu.event_id
                    pdu_results[event_id] = e.error_dict()
                return

            for pdu in pdus_by_room[room_id]:
                event_id = pdu.event_id
                with nested_logging_context(event_id):
                    try:
                        yield self._handle_received_pdu(
                            origin, pdu
                        )
                        pdu_results[event_id] = {}
                    except FederationError as e:
                        logger.warn("Error handling PDU %s: %s", event_id, e)
                        pdu_results[event_id] = {"error": str(e)}
                    except Exception as e:
                        f = failure.Failure()
                        pdu_results[event_id] = {"error": str(e)}
                        logger.error(
                            "Failed to handle PDU %s",
                            event_id,
                            exc_info=(f.type, f.value, f.getTracebackObject()),
                        )

        yield concurrently_execute(
            process_pdus_for_room, pdus_by_room.keys(),
            TRANSACTION_CONCURRENCY_LIMIT,
        )

        if hasattr(transaction, "edus"):
            for edu in (Edu(**x) for x in transaction.edus):
                yield self.received_edu(
                    origin,
                    edu.edu_type,
                    edu.content
                )

        response = {
            "pdus": pdu_results,
        }

        logger.debug("Returning: %s", str(response))

        yield self.transaction_actions.set_response(
            origin,
            transaction,
            200, response
        )
        defer.returnValue((200, response))
Ejemplo n.º 15
0
    def delete_group(self, group_id, requester_user_id):
        """Deletes a group, kicking out all current members.

        Only group admins or server admins can call this request

        Args:
            group_id (str)
            request_user_id (str)

        Returns:
            Deferred
        """

        yield self.check_group_is_ours(
            group_id, requester_user_id,
            and_exists=True,
        )

        # Only server admins or group admins can delete groups.

        is_admin = yield self.store.is_user_admin_in_group(
            group_id, requester_user_id
        )

        if not is_admin:
            is_admin = yield self.auth.is_server_admin(
                UserID.from_string(requester_user_id),
            )

        if not is_admin:
            raise SynapseError(403, "User is not an admin")

        # Before deleting the group lets kick everyone out of it
        users = yield self.store.get_users_in_group(
            group_id, include_private=True,
        )

        @defer.inlineCallbacks
        def _kick_user_from_group(user_id):
            if self.hs.is_mine_id(user_id):
                groups_local = self.hs.get_groups_local_handler()
                yield groups_local.user_removed_from_group(group_id, user_id, {})
            else:
                yield self.transport_client.remove_user_from_group_notification(
                    get_domain_from_id(user_id), group_id, user_id, {}
                )
                yield self.store.maybe_delete_remote_profile_cache(user_id)

        # We kick users out in the order of:
        #   1. Non-admins
        #   2. Other admins
        #   3. The requester
        #
        # This is so that if the deletion fails for some reason other admins or
        # the requester still has auth to retry.
        non_admins = []
        admins = []
        for u in users:
            if u["user_id"] == requester_user_id:
                continue
            if u["is_admin"]:
                admins.append(u["user_id"])
            else:
                non_admins.append(u["user_id"])

        yield concurrently_execute(_kick_user_from_group, non_admins, 10)
        yield concurrently_execute(_kick_user_from_group, admins, 10)
        yield _kick_user_from_group(requester_user_id)

        yield self.store.delete_group(group_id)
Ejemplo n.º 16
0
    def send_notification_mail(self, app_id, user_id, email_address,
                               push_actions, reason):
        try:
            from_string = self.hs.config.email_notif_from % {
                "app": self.app_name
            }
        except TypeError:
            from_string = self.hs.config.email_notif_from

        raw_from = email.utils.parseaddr(from_string)[1]
        raw_to = email.utils.parseaddr(email_address)[1]

        if raw_to == '':
            raise RuntimeError("Invalid 'to' address")

        rooms_in_order = deduped_ordered_list(
            [pa['room_id'] for pa in push_actions]
        )

        notif_events = yield self.store.get_events(
            [pa['event_id'] for pa in push_actions]
        )

        notifs_by_room = {}
        for pa in push_actions:
            notifs_by_room.setdefault(pa["room_id"], []).append(pa)

        # collect the current state for all the rooms in which we have
        # notifications
        state_by_room = {}

        try:
            user_display_name = yield self.store.get_profile_displayname(
                UserID.from_string(user_id).localpart
            )
            if user_display_name is None:
                user_display_name = user_id
        except StoreError:
            user_display_name = user_id

        @defer.inlineCallbacks
        def _fetch_room_state(room_id):
            room_state = yield self.store.get_current_state_ids(room_id)
            state_by_room[room_id] = room_state

        # Run at most 3 of these at once: sync does 10 at a time but email
        # notifs are much less realtime than sync so we can afford to wait a bit.
        yield concurrently_execute(_fetch_room_state, rooms_in_order, 3)

        # actually sort our so-called rooms_in_order list, most recent room first
        rooms_in_order.sort(
            key=lambda r: -(notifs_by_room[r][-1]['received_ts'] or 0)
        )

        rooms = []

        for r in rooms_in_order:
            roomvars = yield self.get_room_vars(
                r, user_id, notifs_by_room[r], notif_events, state_by_room[r]
            )
            rooms.append(roomvars)

        reason['room_name'] = yield calculate_room_name(
            self.store, state_by_room[reason['room_id']], user_id,
            fallback_to_members=True
        )

        summary_text = yield self.make_summary_text(
            notifs_by_room, state_by_room, notif_events, user_id, reason
        )

        template_vars = {
            "user_display_name": user_display_name,
            "unsubscribe_link": self.make_unsubscribe_link(
                user_id, app_id, email_address
            ),
            "summary_text": summary_text,
            "app_name": self.app_name,
            "rooms": rooms,
            "reason": reason,
        }

        html_text = self.notif_template_html.render(**template_vars)
        html_part = MIMEText(html_text, "html", "utf8")

        plain_text = self.notif_template_text.render(**template_vars)
        text_part = MIMEText(plain_text, "plain", "utf8")

        multipart_msg = MIMEMultipart('alternative')
        multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
        multipart_msg['From'] = from_string
        multipart_msg['To'] = email_address
        multipart_msg['Date'] = email.utils.formatdate()
        multipart_msg['Message-ID'] = email.utils.make_msgid()
        multipart_msg.attach(text_part)
        multipart_msg.attach(html_part)

        logger.info("Sending email push notification to %s" % email_address)

        yield make_deferred_yieldable(self.sendmail(
            self.hs.config.email_smtp_host,
            raw_from, raw_to, multipart_msg.as_string().encode('utf8'),
            reactor=self.hs.get_reactor(),
            port=self.hs.config.email_smtp_port,
            requireAuthentication=self.hs.config.email_smtp_user is not None,
            username=self.hs.config.email_smtp_user,
            password=self.hs.config.email_smtp_pass,
            requireTransportSecurity=self.hs.config.require_transport_security
        ))