def _inject_state_event( self, body: Optional[str] = None, state_key: Optional[str] = None, sender: Optional[str] = None, ) -> EventBase: if sender is None: sender = self.user_id if state_key is None: state_key = "state_%i" % (self.event_count, ) self.event_count += 1 if body is None: body = "state event %s" % (state_key, ) return self.get_success( inject_event( self.hs, room_id=self.room_id, sender=sender, type="test_state_event", state_key=state_key, content={"body": body}, ))
def test_get_joined_users_from_context(self): room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) bob_event = self.get_success( event_injection.inject_member_event( self.hs, room, self.u_bob, Membership.JOIN ) ) # first, create a regular event event, context = self.get_success( event_injection.create_event( self.hs, room_id=room, sender=self.u_alice, prev_event_ids=[bob_event.event_id], type="m.test.1", content={}, ) ) users = self.get_success( self.store.get_joined_users_from_context(event, context) ) self.assertEqual(users.keys(), {self.u_alice, self.u_bob}) # Regression test for #7376: create a state event whose key matches bob's # user_id, but which is *not* a membership event, and persist that; then check # that `get_joined_users_from_context` returns the correct users for the next event. non_member_event = self.get_success( event_injection.inject_event( self.hs, room_id=room, sender=self.u_bob, prev_event_ids=[bob_event.event_id], type="m.test.2", state_key=self.u_bob, content={}, ) ) event, context = self.get_success( event_injection.create_event( self.hs, room_id=room, sender=self.u_alice, prev_event_ids=[non_member_event.event_id], type="m.test.3", content={}, ) ) users = self.get_success( self.store.get_joined_users_from_context(event, context) ) self.assertEqual(users.keys(), {self.u_alice, self.u_bob})
def test_not_latest_event(self): """Test that we send the latest event in the room even if its not ours.""" per_dest_queue, sent_pdus = self.make_fake_destination_queue() # Make a room with a local user, and two servers. One will go offline # and one will send some events. self.register_user("u1", "you the one") u1_token = self.login("u1", "you the one") room_1 = self.helper.create_room_as("u1", tok=u1_token) self.get_success( event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join")) event_1 = self.get_success( event_injection.inject_member_event(self.hs, room_1, "@user:host3", "join")) # First we send something from the local server, so that we notice the # remote is down and go into catchup mode. self.helper.send(room_1, "you hear me!!", tok=u1_token) # Now simulate us receiving an event from the still online remote. event_2 = self.get_success( event_injection.inject_event( self.hs, type=EventTypes.Message, sender="@user:host3", room_id=room_1, content={ "msgtype": "m.text", "body": "Hello" }, )) self.get_success(self.hs.get_datastore( ).set_destination_last_successful_stream_ordering( "host2", event_1.internal_metadata.stream_ordering)) self.get_success(per_dest_queue._catch_up_transmission_loop()) # We expect only the last message from the remote, event_2, to have been # sent, rather than the last *local* event that was sent. self.assertEqual(len(sent_pdus), 1) self.assertEqual(sent_pdus[0].event_id, event_2.event_id) self.assertFalse(per_dest_queue._catching_up)
def _inject_test_event(self, body: Optional[str] = None, sender: Optional[str] = None, **kwargs) -> EventBase: if sender is None: sender = self.user_id if body is None: body = "event %i" % (self.event_count, ) self.event_count += 1 return inject_event(self.hs, room_id=self.room_id, sender=sender, type="test_event", content={"body": body}, **kwargs)
def send_example_state_events_to_room( self, hs: "HomeServer", room_id: str, sender: str, ) -> OrderedDict: """Adds some state to a room. State events are those that should be sent to a knocking user after they knock on the room, as well as some state that *shouldn't* be sent to the knocking user. Args: hs: The homeserver of the sender. room_id: The ID of the room to send state into. sender: The ID of the user to send state as. Must be in the room. Returns: The OrderedDict of event types and content that a user is expected to see after knocking on a room. """ # To set a canonical alias, we'll need to point an alias at the room first. canonical_alias = "#fancy_alias:test" self.get_success( self.store.create_room_alias_association( RoomAlias.from_string(canonical_alias), room_id, ["test"])) # Send some state that we *don't* expect to be given to knocking users self.get_success( event_injection.inject_event( hs, room_version=RoomVersions.V7.identifier, room_id=room_id, sender=sender, type="com.example.secret", state_key="", content={"secret": "password"}, )) # We use an OrderedDict here to ensure that the knock membership appears last. # Note that order only matters when sending stripped state to clients, not federated # homeservers. room_state = OrderedDict([ # We need to set the room's join rules to allow knocking ( EventTypes.JoinRules, { "content": { "join_rule": JoinRules.KNOCK }, "state_key": "" }, ), # Below are state events that are to be stripped and sent to clients ( EventTypes.Name, { "content": { "name": "A cool room" }, "state_key": "" }, ), ( EventTypes.RoomAvatar, { "content": { "info": { "h": 398, "mimetype": "image/jpeg", "size": 31037, "w": 394, }, "url": "mxc://example.org/JWEIFJgwEIhweiWJE", }, "state_key": "", }, ), ( EventTypes.RoomEncryption, { "content": { "algorithm": "m.megolm.v1.aes-sha2" }, "state_key": "" }, ), ( EventTypes.CanonicalAlias, { "content": { "alias": canonical_alias, "alt_aliases": [] }, "state_key": "", }, ), ( EventTypes.Topic, { "content": { "topic": "A really cool room", }, "state_key": "", }, ), ]) for event_type, event_dict in room_state.items(): event_content = event_dict["content"] state_key = event_dict["state_key"] self.get_success( event_injection.inject_event( hs, room_version=RoomVersions.V7.identifier, room_id=room_id, sender=sender, type=event_type, state_key=state_key, content=event_content, )) # Finally, we expect to see the m.room.create event of the room as part of the # stripped state. We don't need to inject this event though. room_state[EventTypes.Create] = { "content": { "creator": sender, "room_version": RoomVersions.V7.identifier, }, "state_key": "", } return room_state
def test_ignore_invalid_room(self): """Test that we ignore invalid relations over federation.""" # Create another room and send a message in it. room2 = self.helper.create_room_as(self.user_id, tok=self.user_token) res = self.helper.send(room2, body="Hi!", tok=self.user_token) parent_id = res["event_id"] # Disable the validation to pretend this came over federation. with patch( "synapse.handlers.message.EventCreationHandler._validate_event_relation", new=lambda self, event: make_awaitable(None), ): # Generate a various relations from a different room. self.get_success( inject_event( self.hs, room_id=self.room, type="m.reaction", sender=self.user_id, content={ "m.relates_to": { "rel_type": RelationTypes.ANNOTATION, "event_id": parent_id, "key": "A", } }, )) self.get_success( inject_event( self.hs, room_id=self.room, type="m.room.message", sender=self.user_id, content={ "body": "foo", "msgtype": "m.text", "m.relates_to": { "rel_type": RelationTypes.REFERENCE, "event_id": parent_id, }, }, )) self.get_success( inject_event( self.hs, room_id=self.room, type="m.room.message", sender=self.user_id, content={ "body": "foo", "msgtype": "m.text", "m.relates_to": { "rel_type": RelationTypes.THREAD, "event_id": parent_id, }, }, )) self.get_success( inject_event( self.hs, room_id=self.room, type="m.room.message", sender=self.user_id, content={ "body": "foo", "msgtype": "m.text", "new_content": { "body": "new content", "msgtype": "m.text", }, "m.relates_to": { "rel_type": RelationTypes.REPLACE, "event_id": parent_id, }, }, )) # They should be ignored when fetching relations. channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{room2}/relations/{parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) self.assertEqual(channel.json_body["chunk"], []) # And when fetching aggregations. channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{room2}/aggregations/{parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) self.assertEqual(channel.json_body["chunk"], []) # And for bundled aggregations. channel = self.make_request( "GET", f"/rooms/{room2}/event/{parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) self.assertNotIn("m.relations", channel.json_body["unsigned"])
def test_update_function_state_row_limit(self): """Test replication with many state events over several stream ids.""" # we want to generate lots of state changes, but for this test, we want to # spread out the state changes over a few stream IDs. # # We do this by having two branches in the DAG. On one, we have four moderators, # each of which that generates lots of state; on the other, we de-op the users, # thus invalidating all the state. NUM_USERS = 4 STATES_PER_USER = _STREAM_UPDATE_TARGET_ROW_COUNT // 4 + 1 user_ids = ["@user%i:localhost" % (i, ) for i in range(NUM_USERS)] # have the users join for u in user_ids: self.get_success( inject_member_event(self.hs, self.room_id, u, Membership.JOIN)) # Update existing power levels with mod at PL50 pls = self.helper.get_state(self.room_id, EventTypes.PowerLevels, tok=self.user_tok) pls["users"].update({u: 50 for u in user_ids}) self.helper.send_state( self.room_id, EventTypes.PowerLevels, pls, tok=self.user_tok, ) # this is the point in the DAG where we make a fork fork_point: List[str] = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room( self.room_id)) events: List[EventBase] = [] for user in user_ids: events.extend( self._inject_state_event(sender=user) for _ in range(STATES_PER_USER)) self.replicate() # all those events and state changes should have landed self.assertGreaterEqual(len(self.test_handler.received_rdata_rows), 2 * len(events)) # disconnect, so that we can stack up the changes self.disconnect() self.test_handler.received_rdata_rows.clear() # now roll back all that state by de-modding the users prev_events = fork_point pl_events = [] for u in user_ids: pls["users"][u] = 0 e = self.get_success( inject_event( self.hs, prev_event_ids=prev_events, type=EventTypes.PowerLevels, state_key="", sender=self.user_id, room_id=self.room_id, content=pls, )) prev_events = [e.event_id] pl_events.append(e) # check we're testing what we think we are: no rows should yet have been # received self.assertEqual([], self.test_handler.received_rdata_rows) # now reconnect to pull the updates self.reconnect() self.replicate() # we should have received all the expected rows in the right order (as # well as various cache invalidation updates which we ignore) received_rows = [ row for row in self.test_handler.received_rdata_rows if row[0] == "events" ] self.assertGreaterEqual(len(received_rows), len(events)) for i in range(NUM_USERS): # for each user, we expect the PL event row, followed by state rows for # the PL event and each of the states that got reverted. stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, pl_events[i].event_id) # the state rows are unsorted state_rows: List[EventsStreamCurrentStateRow] = [] for _ in range(STATES_PER_USER + 1): stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) state_rows.append(row.data) state_rows.sort(key=lambda r: r.state_key) sr = state_rows.pop(0) self.assertEqual(sr.type, EventTypes.PowerLevels) self.assertEqual(sr.event_id, pl_events[i].event_id) for sr in state_rows: self.assertEqual(sr.type, "test_state_event") # "None" indicates the state has been deleted self.assertIsNone(sr.event_id) self.assertEqual([], received_rows)
def test_update_function_huge_state_change(self): """Test replication with many state events Ensures that all events are correctly replicated when there are lots of state change rows to be replicated. """ # we want to generate lots of state changes at a single stream ID. # # We do this by having two branches in the DAG. On one, we have a moderator # which that generates lots of state; on the other, we de-op the moderator, # thus invalidating all the state. OTHER_USER = "******" # have the user join self.get_success( inject_member_event(self.hs, self.room_id, OTHER_USER, Membership.JOIN)) # Update existing power levels with mod at PL50 pls = self.helper.get_state(self.room_id, EventTypes.PowerLevels, tok=self.user_tok) pls["users"][OTHER_USER] = 50 self.helper.send_state( self.room_id, EventTypes.PowerLevels, pls, tok=self.user_tok, ) # this is the point in the DAG where we make a fork fork_point: List[str] = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room( self.room_id)) events = [ self._inject_state_event(sender=OTHER_USER) for _ in range(_STREAM_UPDATE_TARGET_ROW_COUNT) ] self.replicate() # all those events and state changes should have landed self.assertGreaterEqual(len(self.test_handler.received_rdata_rows), 2 * len(events)) # disconnect, so that we can stack up the changes self.disconnect() self.test_handler.received_rdata_rows.clear() # a state event which doesn't get rolled back, to check that the state # before the huge update comes through ok state1 = self._inject_state_event() # roll back all the state by de-modding the user prev_events = fork_point pls["users"][OTHER_USER] = 0 pl_event = self.get_success( inject_event( self.hs, prev_event_ids=prev_events, type=EventTypes.PowerLevels, state_key="", sender=self.user_id, room_id=self.room_id, content=pls, )) # one more bit of state that doesn't get rolled back state2 = self._inject_state_event() # check we're testing what we think we are: no rows should yet have been # received self.assertEqual([], self.test_handler.received_rdata_rows) # now reconnect to pull the updates self.reconnect() self.replicate() # we should have received all the expected rows in the right order (as # well as various cache invalidation updates which we ignore) # # we expect: # # - two rows for state1 # - the PL event row, plus state rows for the PL event and each # of the states that got reverted. # - two rows for state2 received_rows = [ row for row in self.test_handler.received_rdata_rows if row[0] == "events" ] # first check the first two rows, which should be state1 stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, state1.event_id) stream_name, token, row = received_rows.pop(0) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) self.assertEqual(row.data.event_id, state1.event_id) # now the last two rows, which should be state2 stream_name, token, row = received_rows.pop(-2) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, state2.event_id) stream_name, token, row = received_rows.pop(-1) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) self.assertEqual(row.data.event_id, state2.event_id) # that should leave us with the rows for the PL event self.assertEqual(len(received_rows), len(events) + 2) stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, pl_event.event_id) # the state rows are unsorted state_rows: List[EventsStreamCurrentStateRow] = [] for stream_name, _, row in received_rows: self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) state_rows.append(row.data) state_rows.sort(key=lambda r: r.state_key) sr = state_rows.pop(0) self.assertEqual(sr.type, EventTypes.PowerLevels) self.assertEqual(sr.event_id, pl_event.event_id) for sr in state_rows: self.assertEqual(sr.type, "test_state_event") # "None" indicates the state has been deleted self.assertIsNone(sr.event_id)