Пример #1
0
 async def check_event_allowed(self, event: EventBase,
                               state: StateMap[EventBase]):
     d = event.get_dict()
     content = unfreeze(event.content)
     content["foo"] = "bar"
     d["content"] = content
     return d
Пример #2
0
    def _apply_edit(self, orig_event: EventBase, serialized_event: JsonDict,
                    edit: EventBase) -> None:
        """Replace the content, preserving existing relations of the serialized event.

        Args:
            orig_event: The original event.
            serialized_event: The original event, serialized. This is modified.
            edit: The event which edits the above.
        """

        # Ensure we take copies of the edit content, otherwise we risk modifying
        # the original event.
        edit_content = edit.content.copy()

        # Unfreeze the event content if necessary, so that we may modify it below
        edit_content = unfreeze(edit_content)
        serialized_event["content"] = edit_content.get("m.new_content", {})

        # Check for existing relations
        relates_to = orig_event.content.get("m.relates_to")
        if relates_to:
            # Keep the relations, ensuring we use a dict copy of the original
            serialized_event["content"]["m.relates_to"] = relates_to.copy()
        else:
            serialized_event["content"].pop("m.relates_to", None)
Пример #3
0
def make_graph(file_name, room_id, file_prefix, limit):
    print "Reading lines"
    with open(file_name) as f:
        lines = f.readlines()

    print "Read lines"

    events = [FrozenEvent(json.loads(line)) for line in lines]

    print "Loaded events."

    events.sort(key=lambda e: e.depth)

    print "Sorted events"

    if limit:
        events = events[-int(limit):]

    node_map = {}

    graph = pydot.Dot(graph_name="Test")

    for event in events:
        t = datetime.datetime.fromtimestamp(
            float(event.origin_server_ts) / 1000
        ).strftime('%Y-%m-%d %H:%M:%S,%f')

        content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
        content = content.replace("\n", "<br/>\n")

        print content
        content = []
        for key, value in unfreeze(event.get_dict()["content"]).items():
            if value is None:
                value = "<null>"
            elif isinstance(value, string_types):
                pass
            else:
                value = json.dumps(value)

            content.append(
                "<b>%s</b>: %s," % (
                    cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
                    cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
                )
            )

        content = "<br/>\n".join(content)

        print content

        label = (
            "<"
            "<b>%(name)s </b><br/>"
            "Type: <b>%(type)s </b><br/>"
            "State key: <b>%(state_key)s </b><br/>"
            "Content: <b>%(content)s </b><br/>"
            "Time: <b>%(time)s </b><br/>"
            "Depth: <b>%(depth)s </b><br/>"
            ">"
        ) % {
            "name": event.event_id,
            "type": event.type,
            "state_key": event.get("state_key", None),
            "content": content,
            "time": t,
            "depth": event.depth,
        }

        node = pydot.Node(
            name=event.event_id,
            label=label,
        )

        node_map[event.event_id] = node
        graph.add_node(node)

    print "Created Nodes"

    for event in events:
        for prev_id, _ in event.prev_events:
            try:
                end_node = node_map[prev_id]
            except:
                end_node = pydot.Node(
                    name=prev_id,
                    label="<<b>%s</b>>" % (prev_id,),
                )

                node_map[prev_id] = end_node
                graph.add_node(end_node)

            edge = pydot.Edge(node_map[event.event_id], end_node)
            graph.add_edge(edge)

    print "Created edges"

    graph.write('%s.dot' % file_prefix, format='raw', prog='dot')

    print "Created Dot"

    graph.write_svg("%s.svg" % file_prefix, prog='dot')

    print "Created svg"
Пример #4
0
def make_graph(db_name, room_id, file_prefix, limit):
    conn = sqlite3.connect(db_name)

    sql = ("SELECT json FROM event_json as j "
           "INNER JOIN events as e ON e.event_id = j.event_id "
           "WHERE j.room_id = ?")

    args = [room_id]

    if limit:
        sql += " ORDER BY topological_ordering DESC, stream_ordering DESC " "LIMIT ?"

        args.append(limit)

    c = conn.execute(sql, args)

    events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]

    events.sort(key=lambda e: e.depth)

    node_map = {}
    state_groups = {}

    graph = pydot.Dot(graph_name="Test")

    for event in events:
        c = conn.execute(
            "SELECT state_group FROM event_to_state_groups "
            "WHERE event_id = ?",
            (event.event_id, ),
        )

        res = c.fetchone()
        state_group = res[0] if res else None

        if state_group is not None:
            state_groups.setdefault(state_group, []).append(event.event_id)

        t = datetime.datetime.fromtimestamp(
            float(event.origin_server_ts) /
            1000).strftime("%Y-%m-%d %H:%M:%S,%f")

        content = json.dumps(unfreeze(event.get_dict()["content"]))

        label = ("<"
                 "<b>%(name)s </b><br/>"
                 "Type: <b>%(type)s </b><br/>"
                 "State key: <b>%(state_key)s </b><br/>"
                 "Content: <b>%(content)s </b><br/>"
                 "Time: <b>%(time)s </b><br/>"
                 "Depth: <b>%(depth)s </b><br/>"
                 "State group: %(state_group)s<br/>"
                 ">") % {
                     "name": event.event_id,
                     "type": event.type,
                     "state_key": event.get("state_key", None),
                     "content": cgi.escape(content, quote=True),
                     "time": t,
                     "depth": event.depth,
                     "state_group": state_group,
                 }

        node = pydot.Node(name=event.event_id, label=label)

        node_map[event.event_id] = node
        graph.add_node(node)

    for event in events:
        for prev_id, _ in event.prev_events:
            try:
                end_node = node_map[prev_id]
            except:
                end_node = pydot.Node(name=prev_id,
                                      label="<<b>%s</b>>" % (prev_id, ))

                node_map[prev_id] = end_node
                graph.add_node(end_node)

            edge = pydot.Edge(node_map[event.event_id], end_node)
            graph.add_edge(edge)

    for group, event_ids in state_groups.items():
        if len(event_ids) <= 1:
            continue

        cluster = pydot.Cluster(str(group),
                                label="<State Group: %s>" % (str(group), ))

        for event_id in event_ids:
            cluster.add_node(node_map[event_id])

        graph.add_subgraph(cluster)

    graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
    graph.write_svg("%s.svg" % file_prefix, prog="dot")
Пример #5
0
def make_graph(file_name, room_id, file_prefix, limit):
    print "Reading lines"
    with open(file_name) as f:
        lines = f.readlines()

    print "Read lines"

    events = [FrozenEvent(json.loads(line)) for line in lines]

    print "Loaded events."

    events.sort(key=lambda e: e.depth)

    print "Sorted events"

    if limit:
        events = events[-int(limit):]

    node_map = {}

    graph = pydot.Dot(graph_name="Test")

    for event in events:
        t = datetime.datetime.fromtimestamp(
            float(event.origin_server_ts) /
            1000).strftime('%Y-%m-%d %H:%M:%S,%f')

        content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
        content = content.replace("\n", "<br/>\n")

        print content
        content = []
        for key, value in unfreeze(event.get_dict()["content"]).items():
            if value is None:
                value = "<null>"
            elif isinstance(value, string_types):
                pass
            else:
                value = json.dumps(value)

            content.append("<b>%s</b>: %s," % (
                cgi.escape(key, quote=True).encode("ascii",
                                                   'xmlcharrefreplace'),
                cgi.escape(value, quote=True).encode("ascii",
                                                     'xmlcharrefreplace'),
            ))

        content = "<br/>\n".join(content)

        print content

        label = ("<"
                 "<b>%(name)s </b><br/>"
                 "Type: <b>%(type)s </b><br/>"
                 "State key: <b>%(state_key)s </b><br/>"
                 "Content: <b>%(content)s </b><br/>"
                 "Time: <b>%(time)s </b><br/>"
                 "Depth: <b>%(depth)s </b><br/>"
                 ">") % {
                     "name": event.event_id,
                     "type": event.type,
                     "state_key": event.get("state_key", None),
                     "content": content,
                     "time": t,
                     "depth": event.depth,
                 }

        node = pydot.Node(
            name=event.event_id,
            label=label,
        )

        node_map[event.event_id] = node
        graph.add_node(node)

    print "Created Nodes"

    for event in events:
        for prev_id, _ in event.prev_events:
            try:
                end_node = node_map[prev_id]
            except:
                end_node = pydot.Node(
                    name=prev_id,
                    label="<<b>%s</b>>" % (prev_id, ),
                )

                node_map[prev_id] = end_node
                graph.add_node(end_node)

            edge = pydot.Edge(node_map[event.event_id], end_node)
            graph.add_edge(edge)

    print "Created edges"

    graph.write('%s.dot' % file_prefix, format='raw', prog='dot')

    print "Created Dot"

    graph.write_svg("%s.svg" % file_prefix, prog='dot')

    print "Created svg"
Пример #6
0
    def do_invite_join(self, target_hosts, room_id, joinee, content, snapshot):
        """ Attempts to join the `joinee` to the room `room_id` via the
        server `target_host`.

        This first triggers a /make_join/ request that returns a partial
        event that we can fill out and sign. This is then sent to the
        remote server via /send_join/ which responds with the state at that
        event and the auth_chains.

        We suspend processing of any received events from this room until we
        have finished processing the join.
        """
        logger.debug("Joining %s to %s", joinee, room_id)

        yield self.store.clean_room_for_join(room_id)

        origin, pdu = yield self.replication_layer.make_join(
            target_hosts,
            room_id,
            joinee
        )

        logger.debug("Got response to make_join: %s", pdu)

        event = pdu

        # We should assert some things.
        # FIXME: Do this in a nicer way
        assert(event.type == EventTypes.Member)
        assert(event.user_id == joinee)
        assert(event.state_key == joinee)
        assert(event.room_id == room_id)

        event.internal_metadata.outlier = False

        self.room_queues[room_id] = []

        builder = self.event_builder_factory.new(
            unfreeze(event.get_pdu_json())
        )

        handled_events = set()

        try:
            builder.event_id = self.event_builder_factory.create_event_id()
            builder.origin = self.hs.hostname
            builder.content = content

            if not hasattr(event, "signatures"):
                builder.signatures = {}

            add_hashes_and_signatures(
                builder,
                self.hs.hostname,
                self.hs.config.signing_key[0],
            )

            new_event = builder.build()

            # Try the host we successfully got a response to /make_join/
            # request first.
            try:
                target_hosts.remove(origin)
                target_hosts.insert(0, origin)
            except ValueError:
                pass

            ret = yield self.replication_layer.send_join(
                target_hosts,
                new_event
            )

            origin = ret["origin"]
            state = ret["state"]
            auth_chain = ret["auth_chain"]
            auth_chain.sort(key=lambda e: e.depth)

            handled_events.update([s.event_id for s in state])
            handled_events.update([a.event_id for a in auth_chain])
            handled_events.add(new_event.event_id)

            logger.debug("do_invite_join auth_chain: %s", auth_chain)
            logger.debug("do_invite_join state: %s", state)

            logger.debug("do_invite_join event: %s", new_event)

            try:
                yield self.store.store_room(
                    room_id=room_id,
                    room_creator_user_id="",
                    is_public=False
                )
            except:
                # FIXME
                pass

            for e in auth_chain:
                e.internal_metadata.outlier = True

                if e.event_id == event.event_id:
                    continue

                try:
                    auth_ids = [e_id for e_id, _ in e.auth_events]
                    auth = {
                        (e.type, e.state_key): e for e in auth_chain
                        if e.event_id in auth_ids
                    }
                    yield self._handle_new_event(
                        origin, e, auth_events=auth
                    )
                except:
                    logger.exception(
                        "Failed to handle auth event %s",
                        e.event_id,
                    )

            for e in state:
                if e.event_id == event.event_id:
                    continue

                e.internal_metadata.outlier = True
                try:
                    auth_ids = [e_id for e_id, _ in e.auth_events]
                    auth = {
                        (e.type, e.state_key): e for e in auth_chain
                        if e.event_id in auth_ids
                    }
                    yield self._handle_new_event(
                        origin, e, auth_events=auth
                    )
                except:
                    logger.exception(
                        "Failed to handle state event %s",
                        e.event_id,
                    )

            auth_ids = [e_id for e_id, _ in event.auth_events]
            auth_events = {
                (e.type, e.state_key): e for e in auth_chain
                if e.event_id in auth_ids
            }

            yield self._handle_new_event(
                origin,
                new_event,
                state=state,
                current_state=state,
                auth_events=auth_events,
            )

            yield self.notifier.on_new_room_event(
                new_event, extra_users=[joinee]
            )

            logger.debug("Finished joining %s to %s", joinee, room_id)
        finally:
            room_queue = self.room_queues[room_id]
            del self.room_queues[room_id]

            for p, origin in room_queue:
                if p.event_id in handled_events:
                    continue

                try:
                    self.on_receive_pdu(origin, p, backfilled=False)
                except:
                    logger.exception("Couldn't handle pdu")

        defer.returnValue(True)
Пример #7
0
    def handle_new_client_event(self,
                                requester,
                                event,
                                context,
                                ratelimit=True,
                                extra_users=[]):
        # We now need to go and hit out to wherever we need to hit out to.

        if ratelimit:
            yield self.ratelimit(requester)

        try:
            yield self.auth.check_from_context(event, context)
        except AuthError as err:
            logger.warn("Denying new event %r because %s", event, err)
            raise err

        # Ensure that we can round trip before trying to persist in db
        try:
            dump = ujson.dumps(unfreeze(event.content))
            ujson.loads(dump)
        except Exception:
            logger.exception("Failed to encode content: %r", event.content)
            raise

        yield self.maybe_kick_guest_users(event, context)

        if event.type == EventTypes.CanonicalAlias:
            # Check the alias is acually valid (at this time at least)
            room_alias_str = event.content.get("alias", None)
            if room_alias_str:
                room_alias = RoomAlias.from_string(room_alias_str)
                directory_handler = self.hs.get_handlers().directory_handler
                mapping = yield directory_handler.get_association(room_alias)

                if mapping["room_id"] != event.room_id:
                    raise SynapseError(
                        400, "Room alias %s does not point to the room" %
                        (room_alias_str, ))

        federation_handler = self.hs.get_handlers().federation_handler

        if event.type == EventTypes.Member:
            if event.content["membership"] == Membership.INVITE:

                def is_inviter_member_event(e):
                    return (e.type == EventTypes.Member
                            and e.sender == event.sender)

                state_to_include_ids = [
                    e_id for k, e_id in context.current_state_ids.iteritems()
                    if k[0] in self.hs.config.room_invite_state_types or k == (
                        EventTypes.Member, event.sender)
                ]

                state_to_include = yield self.store.get_events(
                    state_to_include_ids)

                event.unsigned["invite_room_state"] = [{
                    "type": e.type,
                    "state_key": e.state_key,
                    "content": e.content,
                    "sender": e.sender,
                } for e in state_to_include.itervalues()]

                invitee = UserID.from_string(event.state_key)
                if not self.hs.is_mine(invitee):
                    # TODO: Can we add signature from remote server in a nicer
                    # way? If we have been invited by a remote server, we need
                    # to get them to sign the event.

                    returned_invite = yield federation_handler.send_invite(
                        invitee.domain,
                        event,
                    )

                    event.unsigned.pop("room_state", None)

                    # TODO: Make sure the signatures actually are correct.
                    event.signatures.update(returned_invite.signatures)

        if event.type == EventTypes.Redaction:
            auth_events_ids = yield self.auth.compute_auth_events(
                event,
                context.prev_state_ids,
                for_verification=True,
            )
            auth_events = yield self.store.get_events(auth_events_ids)
            auth_events = {(e.type, e.state_key): e
                           for e in auth_events.values()}
            if self.auth.check_redaction(event, auth_events=auth_events):
                original_event = yield self.store.get_event(
                    event.redacts,
                    check_redacted=False,
                    get_prev_content=False,
                    allow_rejected=False,
                    allow_none=False)
                if event.user_id != original_event.user_id:
                    raise AuthError(
                        403, "You don't have permission to redact events")

        if event.type == EventTypes.Create and context.prev_state_ids:
            raise AuthError(
                403,
                "Changing the room create event is forbidden",
            )

        yield self.action_generator.handle_push_actions_for_event(
            event, context)

        (event_stream_id,
         max_stream_id) = yield self.store.persist_event(event,
                                                         context=context)

        # this intentionally does not yield: we don't care about the result
        # and don't need to wait for it.
        preserve_fn(self.pusher_pool.on_new_notifications)(event_stream_id,
                                                           max_stream_id)

        @defer.inlineCallbacks
        def _notify():
            yield run_on_reactor()
            self.notifier.on_new_room_event(event,
                                            event_stream_id,
                                            max_stream_id,
                                            extra_users=extra_users)

        preserve_fn(_notify)()
Пример #8
0
    async def serialize_event(self,
                              event,
                              time_now,
                              bundle_aggregations=True,
                              **kwargs):
        """Serializes a single event.

        Args:
            event (EventBase)
            time_now (int): The current time in milliseconds
            bundle_aggregations (bool): Whether to bundle in related events
            **kwargs: Arguments to pass to `serialize_event`

        Returns:
            dict: The serialized event
        """
        # To handle the case of presence events and the like
        if not isinstance(event, EventBase):
            return event

        event_id = event.event_id
        serialized_event = serialize_event(event, time_now, **kwargs)

        # If MSC1849 is enabled then we need to look if there are any relations
        # we need to bundle in with the event.
        # Do not bundle relations if the event has been redacted
        if not event.internal_metadata.is_redacted() and (
                self.experimental_msc1849_support_enabled
                and bundle_aggregations):
            annotations = await self.store.get_aggregation_groups_for_event(
                event_id)
            references = await self.store.get_relations_for_event(
                event_id, RelationTypes.REFERENCE, direction="f")

            if annotations.chunk:
                r = serialized_event["unsigned"].setdefault("m.relations", {})
                r[RelationTypes.ANNOTATION] = annotations.to_dict()

            if references.chunk:
                r = serialized_event["unsigned"].setdefault("m.relations", {})
                r[RelationTypes.REFERENCE] = references.to_dict()

            edit = None
            if event.type == EventTypes.Message:
                edit = await self.store.get_applicable_edit(event_id)

            if edit:
                # If there is an edit replace the content, preserving existing
                # relations.

                # Ensure we take copies of the edit content, otherwise we risk modifying
                # the original event.
                edit_content = edit.content.copy()

                # Unfreeze the event content if necessary, so that we may modify it below
                edit_content = unfreeze(edit_content)
                serialized_event["content"] = edit_content.get(
                    "m.new_content", {})

                # Check for existing relations
                relations = event.content.get("m.relates_to")
                if relations:
                    # Keep the relations, ensuring we use a dict copy of the original
                    serialized_event["content"][
                        "m.relates_to"] = relations.copy()
                else:
                    serialized_event["content"].pop("m.relates_to", None)

                r = serialized_event["unsigned"].setdefault("m.relations", {})
                r[RelationTypes.REPLACE] = {
                    "event_id": edit.event_id,
                    "origin_server_ts": edit.origin_server_ts,
                    "sender": edit.sender,
                }

        return serialized_event
Пример #9
0
    def _inject_bundled_aggregations(
        self,
        event: EventBase,
        time_now: int,
        aggregations: "BundledAggregations",
        serialized_event: JsonDict,
    ) -> None:
        """Potentially injects bundled aggregations into the unsigned portion of the serialized event.

        Args:
            event: The event being serialized.
            time_now: The current time in milliseconds
            aggregations: The bundled aggregation to serialize.
            serialized_event: The serialized event which may be modified.

        """
        serialized_aggregations = {}

        if aggregations.annotations:
            serialized_aggregations[RelationTypes.ANNOTATION] = aggregations.annotations

        if aggregations.references:
            serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references

        if aggregations.replace:
            # If there is an edit replace the content, preserving existing
            # relations.
            edit = aggregations.replace

            # Ensure we take copies of the edit content, otherwise we risk modifying
            # the original event.
            edit_content = edit.content.copy()

            # Unfreeze the event content if necessary, so that we may modify it below
            edit_content = unfreeze(edit_content)
            serialized_event["content"] = edit_content.get("m.new_content", {})

            # Check for existing relations
            relates_to = event.content.get("m.relates_to")
            if relates_to:
                # Keep the relations, ensuring we use a dict copy of the original
                serialized_event["content"]["m.relates_to"] = relates_to.copy()
            else:
                serialized_event["content"].pop("m.relates_to", None)

            serialized_aggregations[RelationTypes.REPLACE] = {
                "event_id": edit.event_id,
                "origin_server_ts": edit.origin_server_ts,
                "sender": edit.sender,
            }

        # If this event is the start of a thread, include a summary of the replies.
        if aggregations.thread:
            serialized_aggregations[RelationTypes.THREAD] = {
                # Don't bundle aggregations as this could recurse forever.
                "latest_event": self.serialize_event(
                    aggregations.thread.latest_event, time_now, bundle_aggregations=None
                ),
                "count": aggregations.thread.count,
                "current_user_participated": aggregations.thread.current_user_participated,
            }

        # Include the bundled aggregations in the event.
        if serialized_aggregations:
            serialized_event["unsigned"].setdefault("m.relations", {}).update(
                serialized_aggregations
            )
Пример #10
0
 def get_dict(self):
     # We need to unfreeze what we return
     return unfreeze(super(FrozenEvent, self).get_dict())
Пример #11
0
    def do_invite_join(self, target_hosts, room_id, joinee, content, snapshot):
        """ Attempts to join the `joinee` to the room `room_id` via the
        server `target_host`.

        This first triggers a /make_join/ request that returns a partial
        event that we can fill out and sign. This is then sent to the
        remote server via /send_join/ which responds with the state at that
        event and the auth_chains.

        We suspend processing of any received events from this room until we
        have finished processing the join.
        """
        logger.debug("Joining %s to %s", joinee, room_id)

        origin, pdu = yield self.replication_layer.make_join(
            target_hosts,
            room_id,
            joinee
        )

        logger.debug("Got response to make_join: %s", pdu)

        event = pdu

        # We should assert some things.
        # FIXME: Do this in a nicer way
        assert(event.type == EventTypes.Member)
        assert(event.user_id == joinee)
        assert(event.state_key == joinee)
        assert(event.room_id == room_id)

        event.internal_metadata.outlier = False

        self.room_queues[room_id] = []

        builder = self.event_builder_factory.new(
            unfreeze(event.get_pdu_json())
        )

        handled_events = set()

        try:
            builder.event_id = self.event_builder_factory.create_event_id()
            builder.origin = self.hs.hostname
            builder.content = content

            if not hasattr(event, "signatures"):
                builder.signatures = {}

            add_hashes_and_signatures(
                builder,
                self.hs.hostname,
                self.hs.config.signing_key[0],
            )

            new_event = builder.build()

            # Try the host we successfully got a response to /make_join/
            # request first.
            try:
                target_hosts.remove(origin)
                target_hosts.insert(0, origin)
            except ValueError:
                pass

            ret = yield self.replication_layer.send_join(
                target_hosts,
                new_event
            )

            origin = ret["origin"]
            state = ret["state"]
            auth_chain = ret["auth_chain"]
            auth_chain.sort(key=lambda e: e.depth)

            handled_events.update([s.event_id for s in state])
            handled_events.update([a.event_id for a in auth_chain])
            handled_events.add(new_event.event_id)

            logger.debug("do_invite_join auth_chain: %s", auth_chain)
            logger.debug("do_invite_join state: %s", state)

            logger.debug("do_invite_join event: %s", new_event)

            try:
                yield self.store.store_room(
                    room_id=room_id,
                    room_creator_user_id="",
                    is_public=False
                )
            except:
                # FIXME
                pass

            for e in auth_chain:
                e.internal_metadata.outlier = True

                if e.event_id == event.event_id:
                    continue

                try:
                    auth_ids = [e_id for e_id, _ in e.auth_events]
                    auth = {
                        (e.type, e.state_key): e for e in auth_chain
                        if e.event_id in auth_ids
                    }
                    yield self._handle_new_event(
                        origin, e, auth_events=auth
                    )
                except:
                    logger.exception(
                        "Failed to handle auth event %s",
                        e.event_id,
                    )

            for e in state:
                if e.event_id == event.event_id:
                    continue

                e.internal_metadata.outlier = True
                try:
                    auth_ids = [e_id for e_id, _ in e.auth_events]
                    auth = {
                        (e.type, e.state_key): e for e in auth_chain
                        if e.event_id in auth_ids
                    }
                    yield self._handle_new_event(
                        origin, e, auth_events=auth
                    )
                except:
                    logger.exception(
                        "Failed to handle state event %s",
                        e.event_id,
                    )

            auth_ids = [e_id for e_id, _ in event.auth_events]
            auth_events = {
                (e.type, e.state_key): e for e in auth_chain
                if e.event_id in auth_ids
            }

            yield self._handle_new_event(
                origin,
                new_event,
                state=state,
                current_state=state,
                auth_events=auth_events,
            )

            yield self.notifier.on_new_room_event(
                new_event, extra_users=[joinee]
            )

            logger.debug("Finished joining %s to %s", joinee, room_id)
        finally:
            room_queue = self.room_queues[room_id]
            del self.room_queues[room_id]

            for p, origin in room_queue:
                if p.event_id in handled_events:
                    continue

                try:
                    self.on_receive_pdu(origin, p, backfilled=False)
                except:
                    logger.exception("Couldn't handle pdu")

        defer.returnValue(True)
Пример #12
0
def make_graph(db_name, room_id, file_prefix, limit):
    conn = sqlite3.connect(db_name)

    sql = (
        "SELECT json FROM event_json as j "
        "INNER JOIN events as e ON e.event_id = j.event_id "
        "WHERE j.room_id = ?"
    )

    args = [room_id]

    if limit:
        sql += (
            " ORDER BY topological_ordering DESC, stream_ordering DESC "
            "LIMIT ?"
        )

        args.append(limit)

    c = conn.execute(sql, args)

    events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]

    events.sort(key=lambda e: e.depth)

    node_map = {}
    state_groups = {}

    graph = pydot.Dot(graph_name="Test")

    for event in events:
        c = conn.execute(
            "SELECT state_group FROM event_to_state_groups "
            "WHERE event_id = ?",
            (event.event_id,)
        )

        res = c.fetchone()
        state_group = res[0] if res else None

        if state_group is not None:
            state_groups.setdefault(state_group, []).append(event.event_id)

        t = datetime.datetime.fromtimestamp(
            float(event.origin_server_ts) / 1000
        ).strftime('%Y-%m-%d %H:%M:%S,%f')

        content = json.dumps(unfreeze(event.get_dict()["content"]))

        label = (
            "<"
            "<b>%(name)s </b><br/>"
            "Type: <b>%(type)s </b><br/>"
            "State key: <b>%(state_key)s </b><br/>"
            "Content: <b>%(content)s </b><br/>"
            "Time: <b>%(time)s </b><br/>"
            "Depth: <b>%(depth)s </b><br/>"
            "State group: %(state_group)s<br/>"
            ">"
        ) % {
            "name": event.event_id,
            "type": event.type,
            "state_key": event.get("state_key", None),
            "content": cgi.escape(content, quote=True),
            "time": t,
            "depth": event.depth,
            "state_group": state_group,
        }

        node = pydot.Node(
            name=event.event_id,
            label=label,
        )

        node_map[event.event_id] = node
        graph.add_node(node)

    for event in events:
        for prev_id, _ in event.prev_events:
            try:
                end_node = node_map[prev_id]
            except:
                end_node = pydot.Node(
                    name=prev_id,
                    label="<<b>%s</b>>" % (prev_id,),
                )

                node_map[prev_id] = end_node
                graph.add_node(end_node)

            edge = pydot.Edge(node_map[event.event_id], end_node)
            graph.add_edge(edge)

    for group, event_ids in state_groups.items():
        if len(event_ids) <= 1:
            continue

        cluster = pydot.Cluster(
            str(group),
            label="<State Group: %s>" % (str(group),)
        )

        for event_id in event_ids:
            cluster.add_node(node_map[event_id])

        graph.add_subgraph(cluster)

    graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
    graph.write_svg("%s.svg" % file_prefix, prog='dot')
Пример #13
0
def make_graph(db_name: str, room_id: str, file_prefix: str,
               limit: int) -> None:
    """
    Generate a dot and SVG file for a graph of events in the room based on the
    topological ordering by reading from a Synapse SQLite database.
    """
    conn = sqlite3.connect(db_name)

    sql = "SELECT room_version FROM rooms WHERE room_id = ?"
    c = conn.execute(sql, (room_id, ))
    room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]]

    sql = ("SELECT json, internal_metadata FROM event_json as j "
           "INNER JOIN events as e ON e.event_id = j.event_id "
           "WHERE j.room_id = ?")

    args = [room_id]

    if limit:
        sql += " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"

        args.append(limit)

    c = conn.execute(sql, args)

    events = [
        make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1]))
        for e in c.fetchall()
    ]

    events.sort(key=lambda e: e.depth)

    node_map = {}
    state_groups = {}

    graph = pydot.Dot(graph_name="Test")

    for event in events:
        c = conn.execute(
            "SELECT state_group FROM event_to_state_groups WHERE event_id = ?",
            (event.event_id, ),
        )

        res = c.fetchone()
        state_group = res[0] if res else None

        if state_group is not None:
            state_groups.setdefault(state_group, []).append(event.event_id)

        t = datetime.datetime.fromtimestamp(
            float(event.origin_server_ts) /
            1000).strftime("%Y-%m-%d %H:%M:%S,%f")

        content = json.dumps(unfreeze(event.get_dict()["content"]))

        label = ("<"
                 "<b>%(name)s </b><br/>"
                 "Type: <b>%(type)s </b><br/>"
                 "State key: <b>%(state_key)s </b><br/>"
                 "Content: <b>%(content)s </b><br/>"
                 "Time: <b>%(time)s </b><br/>"
                 "Depth: <b>%(depth)s </b><br/>"
                 "State group: %(state_group)s<br/>"
                 ">") % {
                     "name": event.event_id,
                     "type": event.type,
                     "state_key": event.get("state_key", None),
                     "content": html.escape(content, quote=True),
                     "time": t,
                     "depth": event.depth,
                     "state_group": state_group,
                 }

        node = pydot.Node(name=event.event_id, label=label)

        node_map[event.event_id] = node
        graph.add_node(node)

    for event in events:
        for prev_id in event.prev_event_ids():
            try:
                end_node = node_map[prev_id]
            except Exception:
                end_node = pydot.Node(name=prev_id,
                                      label=f"<<b>{prev_id}</b>>")

                node_map[prev_id] = end_node
                graph.add_node(end_node)

            edge = pydot.Edge(node_map[event.event_id], end_node)
            graph.add_edge(edge)

    for group, event_ids in state_groups.items():
        if len(event_ids) <= 1:
            continue

        cluster = pydot.Cluster(str(group),
                                label=f"<State Group: {str(group)}>")

        for event_id in event_ids:
            cluster.add_node(node_map[event_id])

        graph.add_subgraph(cluster)

    graph.write("%s.dot" % file_prefix, format="raw", prog="dot")
    graph.write_svg("%s.svg" % file_prefix, prog="dot")
Пример #14
0
def make_graph(file_name: str, file_prefix: str, limit: int) -> None:
    """
    Generate a dot and SVG file for a graph of events in the room based on the
    topological ordering by reading line-delimited JSON from a file.
    """
    print("Reading lines")
    with open(file_name) as f:
        lines = f.readlines()

    print("Read lines")

    # Figure out the room version, assume the first line is the create event.
    room_version = KNOWN_ROOM_VERSIONS[json.loads(lines[0]).get(
        "content", {}).get("room_version")]

    events = [
        make_event_from_dict(json.loads(line), room_version) for line in lines
    ]

    print("Loaded events.")

    events.sort(key=lambda e: e.depth)

    print("Sorted events")

    if limit:
        events = events[-int(limit):]

    node_map = {}

    graph = pydot.Dot(graph_name="Test")

    for event in events:
        t = datetime.datetime.fromtimestamp(
            float(event.origin_server_ts) /
            1000).strftime("%Y-%m-%d %H:%M:%S,%f")

        content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
        content = content.replace("\n", "<br/>\n")

        print(content)
        content = []
        for key, value in unfreeze(event.get_dict()["content"]).items():
            if value is None:
                value = "<null>"
            elif isinstance(value, str):
                pass
            else:
                value = json.dumps(value)

            content.append("<b>%s</b>: %s," % (
                html.escape(key, quote=True).encode("ascii",
                                                    "xmlcharrefreplace"),
                html.escape(value, quote=True).encode("ascii",
                                                      "xmlcharrefreplace"),
            ))

        content = "<br/>\n".join(content)

        print(content)

        label = ("<"
                 "<b>%(name)s </b><br/>"
                 "Type: <b>%(type)s </b><br/>"
                 "State key: <b>%(state_key)s </b><br/>"
                 "Content: <b>%(content)s </b><br/>"
                 "Time: <b>%(time)s </b><br/>"
                 "Depth: <b>%(depth)s </b><br/>"
                 ">") % {
                     "name": event.event_id,
                     "type": event.type,
                     "state_key": event.get("state_key", None),
                     "content": content,
                     "time": t,
                     "depth": event.depth,
                 }

        node = pydot.Node(name=event.event_id, label=label)

        node_map[event.event_id] = node
        graph.add_node(node)

    print("Created Nodes")

    for event in events:
        for prev_id in event.prev_event_ids():
            try:
                end_node = node_map[prev_id]
            except Exception:
                end_node = pydot.Node(name=prev_id,
                                      label=f"<<b>{prev_id}</b>>")

                node_map[prev_id] = end_node
                graph.add_node(end_node)

            edge = pydot.Edge(node_map[event.event_id], end_node)
            graph.add_edge(edge)

    print("Created edges")

    graph.write("%s.dot" % file_prefix, format="raw", prog="dot")

    print("Created Dot")

    graph.write_svg("%s.svg" % file_prefix, prog="dot")

    print("Created svg")
Пример #15
0
    def handle_new_client_event(
        self,
        requester,
        event,
        context,
        ratelimit=True,
        extra_users=[],
    ):
        """Processes a new event. This includes checking auth, persisting it,
        notifying users, sending to remote servers, etc.

        If called from a worker will hit out to the master process for final
        processing.

        Args:
            requester (Requester)
            event (FrozenEvent)
            context (EventContext)
            ratelimit (bool)
            extra_users (list(UserID)): Any extra users to notify about event
        """

        try:
            yield self.auth.check_from_context(event, context)
        except AuthError as err:
            logger.warn("Denying new event %r because %s", event, err)
            raise err

        # Ensure that we can round trip before trying to persist in db
        try:
            dump = simplejson.dumps(unfreeze(event.content))
            simplejson.loads(dump)
        except Exception:
            logger.exception("Failed to encode content: %r", event.content)
            raise

        yield self.action_generator.handle_push_actions_for_event(
            event, context)

        try:
            # If we're a worker we need to hit out to the master.
            if self.config.worker_app:
                yield send_event_to_master(
                    self.http_client,
                    host=self.config.worker_replication_host,
                    port=self.config.worker_replication_http_port,
                    requester=requester,
                    event=event,
                    context=context,
                    ratelimit=ratelimit,
                    extra_users=extra_users,
                )
                return

            yield self.persist_and_notify_client_event(
                requester,
                event,
                context,
                ratelimit=ratelimit,
                extra_users=extra_users,
            )
        except:  # noqa: E722, as we reraise the exception this is fine.
            # Ensure that we actually remove the entries in the push actions
            # staging area, if we calculated them.
            preserve_fn(self.store.remove_push_actions_from_staging)(
                event.event_id)
            raise
Пример #16
0
    async def _injected_bundled_aggregations(
            self, event: EventBase, time_now: int,
            serialized_event: JsonDict) -> None:
        """Potentially injects bundled aggregations into the unsigned portion of the serialized event.

        Args:
            event: The event being serialized.
            time_now: The current time in milliseconds
            serialized_event: The serialized event which may be modified.

        """
        # Do not bundle aggregations for an event which represents an edit or an
        # annotation. It does not make sense for them to have related events.
        relates_to = event.content.get("m.relates_to")
        if isinstance(relates_to, (dict, frozendict)):
            relation_type = relates_to.get("rel_type")
            if relation_type in (RelationTypes.ANNOTATION,
                                 RelationTypes.REPLACE):
                return

        event_id = event.event_id

        # The bundled aggregations to include.
        aggregations = {}

        annotations = await self.store.get_aggregation_groups_for_event(
            event_id)
        if annotations.chunk:
            aggregations[RelationTypes.ANNOTATION] = annotations.to_dict()

        references = await self.store.get_relations_for_event(
            event_id, RelationTypes.REFERENCE, direction="f")
        if references.chunk:
            aggregations[RelationTypes.REFERENCE] = references.to_dict()

        edit = None
        if event.type == EventTypes.Message:
            edit = await self.store.get_applicable_edit(event_id)

        if edit:
            # If there is an edit replace the content, preserving existing
            # relations.

            # Ensure we take copies of the edit content, otherwise we risk modifying
            # the original event.
            edit_content = edit.content.copy()

            # Unfreeze the event content if necessary, so that we may modify it below
            edit_content = unfreeze(edit_content)
            serialized_event["content"] = edit_content.get("m.new_content", {})

            # Check for existing relations
            relates_to = event.content.get("m.relates_to")
            if relates_to:
                # Keep the relations, ensuring we use a dict copy of the original
                serialized_event["content"]["m.relates_to"] = relates_to.copy()
            else:
                serialized_event["content"].pop("m.relates_to", None)

            aggregations[RelationTypes.REPLACE] = {
                "event_id": edit.event_id,
                "origin_server_ts": edit.origin_server_ts,
                "sender": edit.sender,
            }

        # If this event is the start of a thread, include a summary of the replies.
        if self._msc3440_enabled:
            (
                thread_count,
                latest_thread_event,
            ) = await self.store.get_thread_summary(event_id)
            if latest_thread_event:
                aggregations[RelationTypes.THREAD] = {
                    # Don't bundle aggregations as this could recurse forever.
                    "latest_event":
                    await self.serialize_event(latest_thread_event,
                                               time_now,
                                               bundle_aggregations=False),
                    "count":
                    thread_count,
                }

        # If any bundled aggregations were found, include them.
        if aggregations:
            serialized_event["unsigned"].setdefault("m.relations",
                                                    {}).update(aggregations)