Exemplo n.º 1
0
def _should_count_as_unread(event: EventBase, context: EventContext) -> bool:
    # Exclude rejected and soft-failed events.
    if context.rejected or event.internal_metadata.is_soft_failed():
        return False

    # Exclude notices.
    if (not event.is_state() and event.type == EventTypes.Message
            and event.content.get("msgtype") == "m.notice"):
        return False

    # Exclude edits.
    relates_to = event.content.get("m.relates_to", {})
    if relates_to.get("rel_type") == RelationTypes.REPLACE:
        return False

    # Mark events that have a non-empty string body as unread.
    body = event.content.get("body")
    if isinstance(body, str) and body:
        return True

    # Mark some state events as unread.
    if event.is_state() and event.type in STATE_EVENT_TYPES_TO_MARK_UNREAD:
        return True

    # Mark encrypted events as unread.
    if not event.is_state() and event.type == EventTypes.Encrypted:
        return True

    return False
Exemplo n.º 2
0
    async def serialize(self, event: EventBase, store: "DataStore") -> dict:
        """Converts self to a type that can be serialized as JSON, and then
        deserialized by `deserialize`

        Args:
            event (FrozenEvent): The event that this context relates to

        Returns:
            dict
        """

        # We don't serialize the full state dicts, instead they get pulled out
        # of the DB on the other side. However, the other side can't figure out
        # the prev_state_ids, so if we're a state event we include the event
        # id that we replaced in the state.
        if event.is_state():
            prev_state_ids = await self.get_prev_state_ids()
            prev_state_id = prev_state_ids.get((event.type, event.state_key))
        else:
            prev_state_id = None

        return {
            "prev_state_id": prev_state_id,
            "event_type": event.type,
            "event_state_key": event.state_key if event.is_state() else None,
            "state_group": self._state_group,
            "state_group_before_event": self.state_group_before_event,
            "rejected": self.rejected,
            "prev_group": self.prev_group,
            "delta_ids": _encode_state_dict(self.delta_ids),
            "app_service_id":
            self.app_service.id if self.app_service else None,
        }
Exemplo n.º 3
0
def _check_size_limits(event: EventBase) -> None:
    if len(event.user_id) > 255:
        raise EventSizeError("'user_id' too large")
    if len(event.room_id) > 255:
        raise EventSizeError("'room_id' too large")
    if event.is_state() and len(event.state_key) > 255:
        raise EventSizeError("'state_key' too large")
    if len(event.type) > 255:
        raise EventSizeError("'type' too large")
    if len(event.event_id) > 255:
        raise EventSizeError("'event_id' too large")
    if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
        raise EventSizeError("event too large")
Exemplo n.º 4
0
    async def check_event_allowed(self, event: EventBase,
                                  state: StateMap[EventBase]):
        if event.is_state() and event.type == EventTypes.Member:
            await self.api.create_and_send_event_into_room({
                "room_id": event.room_id,
                "sender": event.sender,
                "type": "bzh.abolivier.test3",
                "content": {
                    "now": int(time.time())
                },
                "state_key": "",
            })

        return True, None
Exemplo n.º 5
0
def _check_size_limits(event: EventBase) -> None:
    def too_big(field):
        raise EventSizeError("%s too large" % (field, ))

    if len(event.user_id) > 255:
        too_big("user_id")
    if len(event.room_id) > 255:
        too_big("room_id")
    if event.is_state() and len(event.state_key) > 255:
        too_big("state_key")
    if len(event.type) > 255:
        too_big("type")
    if len(event.event_id) > 255:
        too_big("event_id")
    if len(encode_canonical_json(event.get_pdu_json())) > 65536:
        too_big("event")
Exemplo n.º 6
0
    def maybe_schedule_expiry(self, event: EventBase):
        """Schedule the expiry of an event if there's not already one scheduled,
        or if the one running is for an event that will expire after the provided
        timestamp.

        This function needs to invalidate the event cache, which is only possible on
        the master process, and therefore needs to be run on there.

        Args:
            event: The event to schedule the expiry of.
        """

        expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER)
        if not isinstance(expiry_ts, int) or event.is_state():
            return

        # _schedule_expiry_for_event won't actually schedule anything if there's already
        # a task scheduled for a timestamp that's sooner than the provided one.
        self._schedule_expiry_for_event(event.event_id, expiry_ts)
Exemplo n.º 7
0
    async def send_nonmember_event(
        self,
        requester: Requester,
        event: EventBase,
        context: EventContext,
        ratelimit: bool = True,
    ) -> int:
        """
        Persists and notifies local clients and federation of an event.

        Args:
            requester
            event the event to send.
            context: the context of the event.
            ratelimit: Whether to rate limit this send.

        Return:
            The stream_id of the persisted event.
        """
        if event.type == EventTypes.Member:
            raise SynapseError(
                500, "Tried to send member event through non-member codepath")

        user = UserID.from_string(event.sender)

        assert self.hs.is_mine(user), "User must be our own: %s" % (user, )

        if event.is_state():
            prev_state = await self.deduplicate_state_event(event, context)
            if prev_state is not None:
                logger.info(
                    "Not bothering to persist state event %s duplicated by %s",
                    event.event_id,
                    prev_state.event_id,
                )
                return prev_state

        return await self.handle_new_client_event(requester=requester,
                                                  event=event,
                                                  context=context,
                                                  ratelimit=ratelimit)
Exemplo n.º 8
0
    def _validate_retention(self, event: EventBase):
        """Checks that an event that defines the retention policy for a room respects the
        format enforced by the spec.

        Args:
            event: The event to validate.
        """
        if not event.is_state():
            raise SynapseError(code=400, msg="must be a state event")

        min_lifetime = event.content.get("min_lifetime")
        max_lifetime = event.content.get("max_lifetime")

        if min_lifetime is not None:
            if not isinstance(min_lifetime, int):
                raise SynapseError(
                    code=400,
                    msg="'min_lifetime' must be an integer",
                    errcode=Codes.BAD_JSON,
                )

        if max_lifetime is not None:
            if not isinstance(max_lifetime, int):
                raise SynapseError(
                    code=400,
                    msg="'max_lifetime' must be an integer",
                    errcode=Codes.BAD_JSON,
                )

        if (
            min_lifetime is not None
            and max_lifetime is not None
            and min_lifetime > max_lifetime
        ):
            raise SynapseError(
                code=400,
                msg="'min_lifetime' can't be greater than 'max_lifetime",
                errcode=Codes.BAD_JSON,
            )
Exemplo n.º 9
0
    async def action_for_event_by_user(self, event: EventBase,
                                       context: EventContext) -> None:
        """Given an event and context, evaluate the push rules, check if the message
        should increment the unread count, and insert the results into the
        event_push_actions_staging table.
        """
        count_as_unread = _should_count_as_unread(event, context)

        rules_by_user = await self._get_rules_for_event(event, context)
        actions_by_user: Dict[str, List[Union[dict, str]]] = {}

        room_members = await self.store.get_joined_users_from_context(
            event, context)

        (
            power_levels,
            sender_power_level,
        ) = await self._get_power_levels_and_sender_level(event, context)

        evaluator = PushRuleEvaluatorForEvent(event, len(room_members),
                                              sender_power_level, power_levels)

        condition_cache: Dict[str, bool] = {}

        # If the event is not a state event check if any users ignore the sender.
        if not event.is_state():
            ignorers = await self.store.ignored_by(event.sender)
        else:
            ignorers = set()

        for uid, rules in rules_by_user.items():
            if event.sender == uid:
                continue

            if uid in ignorers:
                continue

            display_name = None
            profile_info = room_members.get(uid)
            if profile_info:
                display_name = profile_info.display_name

            if not display_name:
                # Handle the case where we are pushing a membership event to
                # that user, as they might not be already joined.
                if event.type == EventTypes.Member and event.state_key == uid:
                    display_name = event.content.get("displayname", None)

            if count_as_unread:
                # Add an element for the current user if the event needs to be marked as
                # unread, so that add_push_actions_to_staging iterates over it.
                # If the event shouldn't be marked as unread but should notify the
                # current user, it'll be added to the dict later.
                actions_by_user[uid] = []

            for rule in rules:
                if "enabled" in rule and not rule["enabled"]:
                    continue

                matches = _condition_checker(evaluator, rule["conditions"],
                                             uid, display_name,
                                             condition_cache)
                if matches:
                    actions = [
                        x for x in rule["actions"] if x != "dont_notify"
                    ]
                    if actions and "notify" in actions:
                        # Push rules say we should notify the user of this event
                        actions_by_user[uid] = actions
                    break

        # Mark in the DB staging area the push actions for users who should be
        # notified for this event. (This will then get handled when we persist
        # the event)
        await self.store.add_push_actions_to_staging(
            event.event_id,
            actions_by_user,
            count_as_unread,
        )
Exemplo n.º 10
0
    async def _get_bundled_aggregation_for_event(
        self, event: EventBase, user_id: str
    ) -> Optional[BundledAggregations]:
        """Generate bundled aggregations for an event.

        Note that this does not use a cache, but depends on cached methods.

        Args:
            event: The event to calculate bundled aggregations for.
            user_id: The user requesting the bundled aggregations.

        Returns:
            The bundled aggregations for an event, if bundled aggregations are
            enabled and the event can have bundled aggregations.
        """
        # State events and redacted events do not get bundled aggregations.
        if event.is_state() or event.internal_metadata.is_redacted():
            return None

        # Do not bundle aggregations for an event which represents an edit or an
        # annotation. It does not make sense for them to have related events.
        relates_to = event.content.get("m.relates_to")
        if isinstance(relates_to, (dict, frozendict)):
            relation_type = relates_to.get("rel_type")
            if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE):
                return None

        event_id = event.event_id
        room_id = event.room_id

        # The bundled aggregations to include, a mapping of relation type to a
        # type-specific value. Some types include the direct return type here
        # while others need more processing during serialization.
        aggregations = BundledAggregations()

        annotations = await self.get_aggregation_groups_for_event(event_id, room_id)
        if annotations.chunk:
            aggregations.annotations = annotations.to_dict()

        references = await self.get_relations_for_event(
            event_id, room_id, RelationTypes.REFERENCE, direction="f"
        )
        if references.chunk:
            aggregations.references = references.to_dict()

        edit = None
        if event.type == EventTypes.Message:
            edit = await self.get_applicable_edit(event_id, room_id)

        if edit:
            aggregations.replace = edit

        # If this event is the start of a thread, include a summary of the replies.
        if self._msc3440_enabled:
            thread_count, latest_thread_event = await self.get_thread_summary(
                event_id, room_id
            )
            participated = await self.get_thread_participated(
                event_id, room_id, user_id
            )
            if latest_thread_event:
                aggregations.thread = _ThreadAggregation(
                    latest_event=latest_thread_event,
                    count=thread_count,
                    current_user_participated=participated,
                )

        # Store the bundled aggregations in the event metadata for later use.
        return aggregations
Exemplo n.º 11
0
def check(
    room_version_obj: RoomVersion,
    event: EventBase,
    auth_events: StateMap[EventBase],
    do_sig_check: bool = True,
    do_size_check: bool = True,
) -> None:
    """ Checks if this event is correctly authed.

    Args:
        room_version_obj: the version of the room
        event: the event being checked.
        auth_events (dict: event-key -> event): the existing room state.

    Raises:
        AuthError if the checks fail

    Returns:
         if the auth checks pass.
    """
    assert isinstance(auth_events, dict)

    if do_size_check:
        _check_size_limits(event)

    if not hasattr(event, "room_id"):
        raise AuthError(500, "Event has no room_id: %s" % event)

    room_id = event.room_id

    # We need to ensure that the auth events are actually for the same room, to
    # stop people from using powers they've been granted in other rooms for
    # example.
    for auth_event in auth_events.values():
        if auth_event.room_id != room_id:
            raise AuthError(
                403,
                "During auth for event %s in room %s, found event %s in the state "
                "which is in room %s" %
                (event.event_id, room_id, auth_event.event_id,
                 auth_event.room_id),
            )

    if do_sig_check:
        sender_domain = get_domain_from_id(event.sender)

        is_invite_via_3pid = (event.type == EventTypes.Member
                              and event.membership == Membership.INVITE
                              and "third_party_invite" in event.content)

        # Check the sender's domain has signed the event
        if not event.signatures.get(sender_domain):
            # We allow invites via 3pid to have a sender from a different
            # HS, as the sender must match the sender of the original
            # 3pid invite. This is checked further down with the
            # other dedicated membership checks.
            if not is_invite_via_3pid:
                raise AuthError(403, "Event not signed by sender's server")

        if event.format_version in (EventFormatVersions.V1, ):
            # Only older room versions have event IDs to check.
            event_id_domain = get_domain_from_id(event.event_id)

            # Check the origin domain has signed the event
            if not event.signatures.get(event_id_domain):
                raise AuthError(403, "Event not signed by sending server")

    # Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
    #
    # 1. If type is m.room.create:
    if event.type == EventTypes.Create:
        # 1b. If the domain of the room_id does not match the domain of the sender,
        # reject.
        sender_domain = get_domain_from_id(event.sender)
        room_id_domain = get_domain_from_id(event.room_id)
        if room_id_domain != sender_domain:
            raise AuthError(
                403, "Creation event's room_id domain does not match sender's")

        # 1c. If content.room_version is present and is not a recognised version, reject
        room_version_prop = event.content.get("room_version", "1")
        if room_version_prop not in KNOWN_ROOM_VERSIONS:
            raise AuthError(
                403,
                "room appears to have unsupported version %s" %
                (room_version_prop, ),
            )

        logger.debug("Allowing! %s", event)
        return

    # 3. If event does not have a m.room.create in its auth_events, reject.
    creation_event = auth_events.get((EventTypes.Create, ""), None)
    if not creation_event:
        raise AuthError(403, "No create event in auth events")

    # additional check for m.federate
    creating_domain = get_domain_from_id(event.room_id)
    originating_domain = get_domain_from_id(event.sender)
    if creating_domain != originating_domain:
        if not _can_federate(event, auth_events):
            raise AuthError(403, "This room has been marked as unfederatable.")

    # 4. If type is m.room.aliases
    if event.type == EventTypes.Aliases and room_version_obj.special_case_aliases_auth:
        # 4a. If event has no state_key, reject
        if not event.is_state():
            raise AuthError(403, "Alias event must be a state event")
        if not event.state_key:
            raise AuthError(403, "Alias event must have non-empty state_key")

        # 4b. If sender's domain doesn't matches [sic] state_key, reject
        sender_domain = get_domain_from_id(event.sender)
        if event.state_key != sender_domain:
            raise AuthError(
                403, "Alias event's state_key does not match sender's domain")

        # 4c. Otherwise, allow.
        logger.debug("Allowing! %s", event)
        return

    if logger.isEnabledFor(logging.DEBUG):
        logger.debug("Auth events: %s",
                     [a.event_id for a in auth_events.values()])

    if event.type == EventTypes.Member:
        _is_membership_change_allowed(event, auth_events)
        logger.debug("Allowing! %s", event)
        return

    _check_event_sender_in_room(event, auth_events)

    # Special case to allow m.room.third_party_invite events wherever
    # a user is allowed to issue invites.  Fixes
    # https://github.com/vector-im/vector-web/issues/1208 hopefully
    if event.type == EventTypes.ThirdPartyInvite:
        user_level = get_user_power_level(event.user_id, auth_events)
        invite_level = _get_named_level(auth_events, "invite", 0)

        if user_level < invite_level:
            raise AuthError(403, "You don't have permission to invite users")
        else:
            logger.debug("Allowing! %s", event)
            return

    _can_send_event(event, auth_events)

    if event.type == EventTypes.PowerLevels:
        _check_power_levels(room_version_obj, event, auth_events)

    if event.type == EventTypes.Redaction:
        check_redaction(room_version_obj, event, auth_events)

    logger.debug("Allowing! %s", event)
Exemplo n.º 12
0
def check_auth_rules_for_event(
    room_version_obj: RoomVersion, event: EventBase, auth_events: Iterable[EventBase]
) -> None:
    """Check that an event complies with the auth rules

    Checks whether an event passes the auth rules with a given set of state events

    Assumes that we have already checked that the event is the right shape (it has
    enough signatures, has a room ID, etc). In other words:

     - it's fine for use in state resolution, when we have already decided whether to
       accept the event or not, and are now trying to decide whether it should make it
       into the room state

     - when we're doing the initial event auth, it is only suitable in combination with
       a bunch of other tests.

    Args:
        room_version_obj: the version of the room
        event: the event being checked.
        auth_events: the room state to check the events against.

    Raises:
        AuthError if the checks fail
    """
    # We need to ensure that the auth events are actually for the same room, to
    # stop people from using powers they've been granted in other rooms for
    # example.
    #
    # Arguably we don't need to do this when we're just doing state res, as presumably
    # the state res algorithm isn't silly enough to give us events from different rooms.
    # Still, it's easier to do it anyway.
    room_id = event.room_id
    for auth_event in auth_events:
        if auth_event.room_id != room_id:
            raise AuthError(
                403,
                "During auth for event %s in room %s, found event %s in the state "
                "which is in room %s"
                % (event.event_id, room_id, auth_event.event_id, auth_event.room_id),
            )
        if auth_event.rejected_reason:
            raise AuthError(
                403,
                "During auth for event %s: found rejected event %s in the state"
                % (event.event_id, auth_event.event_id),
            )

    # Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules
    #
    # 1. If type is m.room.create:
    if event.type == EventTypes.Create:
        # 1b. If the domain of the room_id does not match the domain of the sender,
        # reject.
        sender_domain = get_domain_from_id(event.sender)
        room_id_domain = get_domain_from_id(event.room_id)
        if room_id_domain != sender_domain:
            raise AuthError(
                403, "Creation event's room_id domain does not match sender's"
            )

        # 1c. If content.room_version is present and is not a recognised version, reject
        room_version_prop = event.content.get("room_version", "1")
        if room_version_prop not in KNOWN_ROOM_VERSIONS:
            raise AuthError(
                403,
                "room appears to have unsupported version %s" % (room_version_prop,),
            )

        logger.debug("Allowing! %s", event)
        return

    auth_dict = {(e.type, e.state_key): e for e in auth_events}

    # 3. If event does not have a m.room.create in its auth_events, reject.
    creation_event = auth_dict.get((EventTypes.Create, ""), None)
    if not creation_event:
        raise AuthError(403, "No create event in auth events")

    # additional check for m.federate
    creating_domain = get_domain_from_id(event.room_id)
    originating_domain = get_domain_from_id(event.sender)
    if creating_domain != originating_domain:
        if not _can_federate(event, auth_dict):
            raise AuthError(403, "This room has been marked as unfederatable.")

    # 4. If type is m.room.aliases
    if event.type == EventTypes.Aliases and room_version_obj.special_case_aliases_auth:
        # 4a. If event has no state_key, reject
        if not event.is_state():
            raise AuthError(403, "Alias event must be a state event")
        if not event.state_key:
            raise AuthError(403, "Alias event must have non-empty state_key")

        # 4b. If sender's domain doesn't matches [sic] state_key, reject
        sender_domain = get_domain_from_id(event.sender)
        if event.state_key != sender_domain:
            raise AuthError(
                403, "Alias event's state_key does not match sender's domain"
            )

        # 4c. Otherwise, allow.
        logger.debug("Allowing! %s", event)
        return

    # 5. If type is m.room.membership
    if event.type == EventTypes.Member:
        _is_membership_change_allowed(room_version_obj, event, auth_dict)
        logger.debug("Allowing! %s", event)
        return

    _check_event_sender_in_room(event, auth_dict)

    # Special case to allow m.room.third_party_invite events wherever
    # a user is allowed to issue invites.  Fixes
    # https://github.com/vector-im/vector-web/issues/1208 hopefully
    if event.type == EventTypes.ThirdPartyInvite:
        user_level = get_user_power_level(event.user_id, auth_dict)
        invite_level = get_named_level(auth_dict, "invite", 0)

        if user_level < invite_level:
            raise AuthError(403, "You don't have permission to invite users")
        else:
            logger.debug("Allowing! %s", event)
            return

    _can_send_event(event, auth_dict)

    if event.type == EventTypes.PowerLevels:
        _check_power_levels(room_version_obj, event, auth_dict)

    if event.type == EventTypes.Redaction:
        check_redaction(room_version_obj, event, auth_dict)

    if (
        event.type == EventTypes.MSC2716_INSERTION
        or event.type == EventTypes.MSC2716_BATCH
        or event.type == EventTypes.MSC2716_MARKER
    ):
        check_historical(room_version_obj, event, auth_dict)

    logger.debug("Allowing! %s", event)
Exemplo n.º 13
0
    async def compute_event_context(
        self,
        event: EventBase,
        state_ids_before_event: Optional[StateMap[str]] = None,
        partial_state: bool = False,
    ) -> EventContext:
        """Build an EventContext structure for a non-outlier event.

        (for an outlier, call EventContext.for_outlier directly)

        This works out what the current state should be for the event, and
        generates a new state group if necessary.

        Args:
            event:
            state_ids_before_event: The event ids of the state before the event if
                it can't be calculated from existing events. This is normally
                only specified when receiving an event from federation where we
                don't have the prev events, e.g. when backfilling.
            partial_state: True if `state_ids_before_event` is partial and omits
                non-critical membership events
        Returns:
            The event context.
        """

        assert not event.internal_metadata.is_outlier()

        #
        # first of all, figure out the state before the event, unless we
        # already have it.
        #
        if state_ids_before_event:
            # if we're given the state before the event, then we use that
            state_group_before_event = None
            state_group_before_event_prev_group = None
            deltas_to_state_group_before_event = None
            entry = None

        else:
            # otherwise, we'll need to resolve the state across the prev_events.

            # partial_state should not be set explicitly in this case:
            # we work it out dynamically
            assert not partial_state

            # if any of the prev-events have partial state, so do we.
            # (This is slightly racy - the prev-events might get fixed up before we use
            # their states - but I don't think that really matters; it just means we
            # might redundantly recalculate the state for this event later.)
            prev_event_ids = event.prev_event_ids()
            incomplete_prev_events = await self.store.get_partial_state_events(
                prev_event_ids
            )
            if any(incomplete_prev_events.values()):
                logger.debug(
                    "New/incoming event %s refers to prev_events %s with partial state",
                    event.event_id,
                    [k for (k, v) in incomplete_prev_events.items() if v],
                )
                partial_state = True

            logger.debug("calling resolve_state_groups from compute_event_context")
            entry = await self.resolve_state_groups_for_events(
                event.room_id, event.prev_event_ids()
            )

            state_ids_before_event = entry.state
            state_group_before_event = entry.state_group
            state_group_before_event_prev_group = entry.prev_group
            deltas_to_state_group_before_event = entry.delta_ids

        #
        # make sure that we have a state group at that point. If it's not a state event,
        # that will be the state group for the new event. If it *is* a state event,
        # it might get rejected (in which case we'll need to persist it with the
        # previous state group)
        #

        if not state_group_before_event:
            state_group_before_event = (
                await self._state_storage_controller.store_state_group(
                    event.event_id,
                    event.room_id,
                    prev_group=state_group_before_event_prev_group,
                    delta_ids=deltas_to_state_group_before_event,
                    current_state_ids=state_ids_before_event,
                )
            )

            # Assign the new state group to the cached state entry.
            #
            # Note that this can race in that we could generate multiple state
            # groups for the same state entry, but that is just inefficient
            # rather than dangerous.
            if entry and entry.state_group is None:
                entry.state_group = state_group_before_event

        #
        # now if it's not a state event, we're done
        #

        if not event.is_state():
            return EventContext.with_state(
                storage=self._storage_controllers,
                state_group_before_event=state_group_before_event,
                state_group=state_group_before_event,
                state_delta_due_to_event={},
                prev_group=state_group_before_event_prev_group,
                delta_ids=deltas_to_state_group_before_event,
                partial_state=partial_state,
            )

        #
        # otherwise, we'll need to create a new state group for after the event
        #

        key = (event.type, event.state_key)
        if key in state_ids_before_event:
            replaces = state_ids_before_event[key]
            if replaces != event.event_id:
                event.unsigned["replaces_state"] = replaces

        state_ids_after_event = dict(state_ids_before_event)
        state_ids_after_event[key] = event.event_id
        delta_ids = {key: event.event_id}

        state_group_after_event = (
            await self._state_storage_controller.store_state_group(
                event.event_id,
                event.room_id,
                prev_group=state_group_before_event,
                delta_ids=delta_ids,
                current_state_ids=state_ids_after_event,
            )
        )

        return EventContext.with_state(
            storage=self._storage_controllers,
            state_group=state_group_after_event,
            state_group_before_event=state_group_before_event,
            state_delta_due_to_event=delta_ids,
            prev_group=state_group_before_event,
            delta_ids=delta_ids,
            partial_state=partial_state,
        )
Exemplo n.º 14
0
    def compute_event_context(self,
                              event: EventBase,
                              old_state: Optional[Iterable[EventBase]] = None):
        """Build an EventContext structure for the event.

        This works out what the current state should be for the event, and
        generates a new state group if necessary.

        Args:
            event:
            old_state: The state at the event if it can't be
                calculated from existing events. This is normally only specified
                when receiving an event from federation where we don't have the
                prev events for, e.g. when backfilling.
        Returns:
            synapse.events.snapshot.EventContext:
        """

        if event.internal_metadata.is_outlier():
            # If this is an outlier, then we know it shouldn't have any current
            # state. Certainly store.get_current_state won't return any, and
            # persisting the event won't store the state group.

            # FIXME: why do we populate current_state_ids? I thought the point was
            # that we weren't supposed to have any state for outliers?
            if old_state:
                prev_state_ids = {(s.type, s.state_key): s.event_id
                                  for s in old_state}
                if event.is_state():
                    current_state_ids = dict(prev_state_ids)
                    key = (event.type, event.state_key)
                    current_state_ids[key] = event.event_id
                else:
                    current_state_ids = prev_state_ids
            else:
                current_state_ids = {}
                prev_state_ids = {}

            # We don't store state for outliers, so we don't generate a state
            # group for it.
            context = EventContext.with_state(
                state_group=None,
                state_group_before_event=None,
                current_state_ids=current_state_ids,
                prev_state_ids=prev_state_ids,
            )

            return context

        #
        # first of all, figure out the state before the event
        #

        if old_state:
            # if we're given the state before the event, then we use that
            state_ids_before_event = {(s.type, s.state_key): s.event_id
                                      for s in old_state}
            state_group_before_event = None
            state_group_before_event_prev_group = None
            deltas_to_state_group_before_event = None

        else:
            # otherwise, we'll need to resolve the state across the prev_events.
            logger.debug(
                "calling resolve_state_groups from compute_event_context")

            entry = yield self.resolve_state_groups_for_events(
                event.room_id, event.prev_event_ids())

            state_ids_before_event = entry.state
            state_group_before_event = entry.state_group
            state_group_before_event_prev_group = entry.prev_group
            deltas_to_state_group_before_event = entry.delta_ids

        #
        # make sure that we have a state group at that point. If it's not a state event,
        # that will be the state group for the new event. If it *is* a state event,
        # it might get rejected (in which case we'll need to persist it with the
        # previous state group)
        #

        if not state_group_before_event:
            state_group_before_event = yield self.state_store.store_state_group(
                event.event_id,
                event.room_id,
                prev_group=state_group_before_event_prev_group,
                delta_ids=deltas_to_state_group_before_event,
                current_state_ids=state_ids_before_event,
            )

            # XXX: can we update the state cache entry for the new state group? or
            # could we set a flag on resolve_state_groups_for_events to tell it to
            # always make a state group?

        #
        # now if it's not a state event, we're done
        #

        if not event.is_state():
            return EventContext.with_state(
                state_group_before_event=state_group_before_event,
                state_group=state_group_before_event,
                current_state_ids=state_ids_before_event,
                prev_state_ids=state_ids_before_event,
                prev_group=state_group_before_event_prev_group,
                delta_ids=deltas_to_state_group_before_event,
            )

        #
        # otherwise, we'll need to create a new state group for after the event
        #

        key = (event.type, event.state_key)
        if key in state_ids_before_event:
            replaces = state_ids_before_event[key]
            if replaces != event.event_id:
                event.unsigned["replaces_state"] = replaces

        state_ids_after_event = dict(state_ids_before_event)
        state_ids_after_event[key] = event.event_id
        delta_ids = {key: event.event_id}

        state_group_after_event = yield self.state_store.store_state_group(
            event.event_id,
            event.room_id,
            prev_group=state_group_before_event,
            delta_ids=delta_ids,
            current_state_ids=state_ids_after_event,
        )

        return EventContext.with_state(
            state_group=state_group_after_event,
            state_group_before_event=state_group_before_event,
            current_state_ids=state_ids_after_event,
            prev_state_ids=state_ids_before_event,
            prev_group=state_group_before_event,
            delta_ids=delta_ids,
        )
Exemplo n.º 15
0
    async def compute_event_context(
            self,
            event: EventBase,
            old_state: Optional[Iterable[EventBase]] = None) -> EventContext:
        """Build an EventContext structure for a non-outlier event.

        (for an outlier, call EventContext.for_outlier directly)

        This works out what the current state should be for the event, and
        generates a new state group if necessary.

        Args:
            event:
            old_state: The state at the event if it can't be
                calculated from existing events. This is normally only specified
                when receiving an event from federation where we don't have the
                prev events for, e.g. when backfilling.
        Returns:
            The event context.
        """

        assert not event.internal_metadata.is_outlier()

        #
        # first of all, figure out the state before the event
        #

        if old_state:
            # if we're given the state before the event, then we use that
            state_ids_before_event: StateMap[str] = {(s.type, s.state_key):
                                                     s.event_id
                                                     for s in old_state}
            state_group_before_event = None
            state_group_before_event_prev_group = None
            deltas_to_state_group_before_event = None
            entry = None

        else:
            # otherwise, we'll need to resolve the state across the prev_events.
            logger.debug(
                "calling resolve_state_groups from compute_event_context")

            entry = await self.resolve_state_groups_for_events(
                event.room_id, event.prev_event_ids())

            state_ids_before_event = entry.state
            state_group_before_event = entry.state_group
            state_group_before_event_prev_group = entry.prev_group
            deltas_to_state_group_before_event = entry.delta_ids

        #
        # make sure that we have a state group at that point. If it's not a state event,
        # that will be the state group for the new event. If it *is* a state event,
        # it might get rejected (in which case we'll need to persist it with the
        # previous state group)
        #

        if not state_group_before_event:
            state_group_before_event = await self.state_store.store_state_group(
                event.event_id,
                event.room_id,
                prev_group=state_group_before_event_prev_group,
                delta_ids=deltas_to_state_group_before_event,
                current_state_ids=state_ids_before_event,
            )

            # Assign the new state group to the cached state entry.
            #
            # Note that this can race in that we could generate multiple state
            # groups for the same state entry, but that is just inefficient
            # rather than dangerous.
            if entry and entry.state_group is None:
                entry.state_group = state_group_before_event

        #
        # now if it's not a state event, we're done
        #

        if not event.is_state():
            return EventContext.with_state(
                state_group_before_event=state_group_before_event,
                state_group=state_group_before_event,
                current_state_ids=state_ids_before_event,
                prev_state_ids=state_ids_before_event,
                prev_group=state_group_before_event_prev_group,
                delta_ids=deltas_to_state_group_before_event,
            )

        #
        # otherwise, we'll need to create a new state group for after the event
        #

        key = (event.type, event.state_key)
        if key in state_ids_before_event:
            replaces = state_ids_before_event[key]
            if replaces != event.event_id:
                event.unsigned["replaces_state"] = replaces

        state_ids_after_event = dict(state_ids_before_event)
        state_ids_after_event[key] = event.event_id
        delta_ids = {key: event.event_id}

        state_group_after_event = await self.state_store.store_state_group(
            event.event_id,
            event.room_id,
            prev_group=state_group_before_event,
            delta_ids=delta_ids,
            current_state_ids=state_ids_after_event,
        )

        return EventContext.with_state(
            state_group=state_group_after_event,
            state_group_before_event=state_group_before_event,
            current_state_ids=state_ids_after_event,
            prev_state_ids=state_ids_before_event,
            prev_group=state_group_before_event,
            delta_ids=delta_ids,
        )
Exemplo n.º 16
0
    async def handle_new_client_event(
        self,
        requester: Requester,
        event: EventBase,
        context: EventContext,
        ratelimit: bool = True,
        extra_users: List[UserID] = [],
    ) -> int:
        """Processes a new event. This includes checking auth, persisting it,
        notifying users, sending to remote servers, etc.

        If called from a worker will hit out to the master process for final
        processing.

        Args:
            requester
            event
            context
            ratelimit
            extra_users: Any extra users to notify about event

        Return:
            The stream_id of the persisted event.
        """

        if event.is_state() and (event.type, event.state_key) == (
                EventTypes.Create,
                "",
        ):
            room_version = event.content.get("room_version",
                                             RoomVersions.V1.identifier)
        else:
            room_version = await self.store.get_room_version_id(event.room_id)

        event_allowed = await self.third_party_event_rules.check_event_allowed(
            event, context)
        if not event_allowed:
            raise SynapseError(403,
                               "This event is not allowed in this context",
                               Codes.FORBIDDEN)

        if event.internal_metadata.is_out_of_band_membership():
            # the only sort of out-of-band-membership events we expect to see here
            # are invite rejections we have generated ourselves.
            assert event.type == EventTypes.Member
            assert event.content["membership"] == Membership.LEAVE
        else:
            try:
                await self.auth.check_from_context(room_version, event,
                                                   context)
            except AuthError as err:
                logger.warning("Denying new event %r because %s", event, err)
                raise err

        # Ensure that we can round trip before trying to persist in db
        try:
            dump = frozendict_json_encoder.encode(event.content)
            json.loads(dump)
        except Exception:
            logger.exception("Failed to encode content: %r", event.content)
            raise

        await self.action_generator.handle_push_actions_for_event(
            event, context)

        try:
            # If we're a worker we need to hit out to the master.
            if not self._is_event_writer:
                result = await self.send_event(
                    instance_name=self.config.worker.writers.events,
                    event_id=event.event_id,
                    store=self.store,
                    requester=requester,
                    event=event,
                    context=context,
                    ratelimit=ratelimit,
                    extra_users=extra_users,
                )
                stream_id = result["stream_id"]
                event.internal_metadata.stream_ordering = stream_id
                return stream_id

            stream_id = await self.persist_and_notify_client_event(
                requester,
                event,
                context,
                ratelimit=ratelimit,
                extra_users=extra_users)

            return stream_id
        except Exception:
            # Ensure that we actually remove the entries in the push actions
            # staging area, if we calculated them.
            run_in_background(self.store.remove_push_actions_from_staging,
                              event.event_id)
            raise
Exemplo n.º 17
0
    async def _build_notification_dict(self, event: EventBase,
                                       tweaks: Dict[str, bool],
                                       badge: int) -> Dict[str, Any]:
        priority = "low"
        if (event.type == EventTypes.Encrypted or tweaks.get("highlight")
                or tweaks.get("sound")):
            # HACK send our push as high priority only if it generates a sound, highlight
            #  or may do so (i.e. is encrypted so has unknown effects).
            priority = "high"

        # This was checked in the __init__, but mypy doesn't seem to know that.
        assert self.data is not None
        if self.data.get("format") == "event_id_only":
            d = {
                "notification": {
                    "event_id":
                    event.event_id,
                    "room_id":
                    event.room_id,
                    "counts": {
                        "unread": badge
                    },
                    "prio":
                    priority,
                    "devices": [{
                        "app_id": self.app_id,
                        "pushkey": self.pushkey,
                        "pushkey_ts": int(self.pushkey_ts / 1000),
                        "data": self.data_minus_url,
                    }],
                }
            }
            return d

        ctx = await push_tools.get_context_for_event(self.storage, event,
                                                     self.user_id)

        d = {
            "notification": {
                "id":
                event.event_id,  # deprecated: remove soon
                "event_id":
                event.event_id,
                "room_id":
                event.room_id,
                "type":
                event.type,
                "sender":
                event.user_id,
                "prio":
                priority,
                "counts": {
                    "unread": badge,
                    # 'missed_calls': 2
                },
                "devices": [{
                    "app_id": self.app_id,
                    "pushkey": self.pushkey,
                    "pushkey_ts": int(self.pushkey_ts / 1000),
                    "data": self.data_minus_url,
                    "tweaks": tweaks,
                }],
            }
        }
        if event.type == "m.room.member" and event.is_state():
            d["notification"]["membership"] = event.content["membership"]
            d["notification"][
                "user_is_target"] = event.state_key == self.user_id
        if self.hs.config.push.push_include_content and event.content:
            d["notification"]["content"] = event.content

        # We no longer send aliases separately, instead, we send the human
        # readable name of the room, which may be an alias.
        if "sender_display_name" in ctx and len(
                ctx["sender_display_name"]) > 0:
            d["notification"]["sender_display_name"] = ctx[
                "sender_display_name"]
        if "name" in ctx and len(ctx["name"]) > 0:
            d["notification"]["room_name"] = ctx["name"]

        return d
Exemplo n.º 18
0
    def allowed(event: EventBase) -> Optional[EventBase]:
        """
        Args:
            event: event to check

        Returns:
           None if the user cannot see this event at all

           a redacted copy of the event if they can only see a redacted
           version

           the original event if they can see it as normal.
        """
        # Only run some checks if these events aren't about to be sent to clients. This is
        # because, if this is not the case, we're probably only checking if the users can
        # see events in the room at that point in the DAG, and that shouldn't be decided
        # on those checks.
        if filter_send_to_client:
            if event.type == EventTypes.Dummy:
                return None

            if not event.is_state() and event.sender in ignore_list:
                return None

            # Until MSC2261 has landed we can't redact malicious alias events, so for
            # now we temporarily filter out m.room.aliases entirely to mitigate
            # abuse, while we spec a better solution to advertising aliases
            # on rooms.
            if event.type == EventTypes.Aliases:
                return None

            # Don't try to apply the room's retention policy if the event is a state
            # event, as MSC1763 states that retention is only considered for non-state
            # events.
            if not event.is_state():
                retention_policy = retention_policies[event.room_id]
                max_lifetime = retention_policy.get("max_lifetime")

                if max_lifetime is not None:
                    oldest_allowed_ts = storage.main.clock.time_msec(
                    ) - max_lifetime

                    if event.origin_server_ts < oldest_allowed_ts:
                        return None

        if event.event_id in always_include_ids:
            return event

        state = event_id_to_state[event.event_id]

        # get the room_visibility at the time of the event.
        visibility_event = state.get((EventTypes.RoomHistoryVisibility, ""),
                                     None)
        if visibility_event:
            visibility = visibility_event.content.get("history_visibility",
                                                      HistoryVisibility.SHARED)
        else:
            visibility = HistoryVisibility.SHARED

        if visibility not in VISIBILITY_PRIORITY:
            visibility = HistoryVisibility.SHARED

        # Always allow history visibility events on boundaries. This is done
        # by setting the effective visibility to the least restrictive
        # of the old vs new.
        if event.type == EventTypes.RoomHistoryVisibility:
            prev_content = event.unsigned.get("prev_content", {})
            prev_visibility = prev_content.get("history_visibility", None)

            if prev_visibility not in VISIBILITY_PRIORITY:
                prev_visibility = HistoryVisibility.SHARED

            new_priority = VISIBILITY_PRIORITY.index(visibility)
            old_priority = VISIBILITY_PRIORITY.index(prev_visibility)
            if old_priority < new_priority:
                visibility = prev_visibility

        # likewise, if the event is the user's own membership event, use
        # the 'most joined' membership
        membership = None
        if event.type == EventTypes.Member and event.state_key == user_id:
            membership = event.content.get("membership", None)
            if membership not in MEMBERSHIP_PRIORITY:
                membership = "leave"

            prev_content = event.unsigned.get("prev_content", {})
            prev_membership = prev_content.get("membership", None)
            if prev_membership not in MEMBERSHIP_PRIORITY:
                prev_membership = "leave"

            # Always allow the user to see their own leave events, otherwise
            # they won't see the room disappear if they reject the invite
            if membership == "leave" and (prev_membership == "join"
                                          or prev_membership == "invite"):
                return event

            new_priority = MEMBERSHIP_PRIORITY.index(membership)
            old_priority = MEMBERSHIP_PRIORITY.index(prev_membership)
            if old_priority < new_priority:
                membership = prev_membership

        # otherwise, get the user's membership at the time of the event.
        if membership is None:
            membership_event = state.get((EventTypes.Member, user_id), None)
            if membership_event:
                membership = membership_event.membership

        # if the user was a member of the room at the time of the event,
        # they can see it.
        if membership == Membership.JOIN:
            return event

        # otherwise, it depends on the room visibility.

        if visibility == HistoryVisibility.JOINED:
            # we weren't a member at the time of the event, so we can't
            # see this event.
            return None

        elif visibility == HistoryVisibility.INVITED:
            # user can also see the event if they were *invited* at the time
            # of the event.
            return event if membership == Membership.INVITE else None

        elif visibility == HistoryVisibility.SHARED and is_peeking:
            # if the visibility is shared, users cannot see the event unless
            # they have *subsequently* joined the room (or were members at the
            # time, of course)
            #
            # XXX: if the user has subsequently joined and then left again,
            # ideally we would share history up to the point they left. But
            # we don't know when they left. We just treat it as though they
            # never joined, and restrict access.
            return None

        # the visibility is either shared or world_readable, and the user was
        # not a member at the time. We allow it, provided the original sender
        # has not requested their data to be erased, in which case, we return
        # a redacted version.
        if erased_senders[event.sender]:
            return prune_event(event)

        return event