def inject_room_member(self, room: str, user: str, membership: Membership) -> None: """ Inject a membership event into a room. Deprecated: use event_injection.inject_room_member directly Args: room: Room ID to inject the event into. user: MXID of the user to inject the membership for. membership: The membership type. """ event_injection.inject_member_event(self.hs, room, user, membership)
def test_catch_up_destination_rooms_tracking(self): """ Tests that we populate the `destination_rooms` table as needed. """ self.register_user("u1", "you the one") u1_token = self.login("u1", "you the one") room = self.helper.create_room_as("u1", tok=u1_token) self.get_success( event_injection.inject_member_event(self.hs, room, "@user:host2", "join")) event_id_1 = self.helper.send(room, "wombats!", tok=u1_token)["event_id"] row_1 = self.get_destination_room(room) event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"] row_2 = self.get_destination_room(room) # check: events correctly registered in order self.assertEqual(row_1["event_id"], event_id_1) self.assertEqual(row_2["event_id"], event_id_2) self.assertEqual(row_1["stream_ordering"], row_2["stream_ordering"] - 1)
def test_not_latest_event(self): """Test that we send the latest event in the room even if its not ours.""" per_dest_queue, sent_pdus = self.make_fake_destination_queue() # Make a room with a local user, and two servers. One will go offline # and one will send some events. self.register_user("u1", "you the one") u1_token = self.login("u1", "you the one") room_1 = self.helper.create_room_as("u1", tok=u1_token) self.get_success( event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join")) event_1 = self.get_success( event_injection.inject_member_event(self.hs, room_1, "@user:host3", "join")) # First we send something from the local server, so that we notice the # remote is down and go into catchup mode. self.helper.send(room_1, "you hear me!!", tok=u1_token) # Now simulate us receiving an event from the still online remote. event_2 = self.get_success( event_injection.inject_event( self.hs, type=EventTypes.Message, sender="@user:host3", room_id=room_1, content={ "msgtype": "m.text", "body": "Hello" }, )) self.get_success(self.hs.get_datastore( ).set_destination_last_successful_stream_ordering( "host2", event_1.internal_metadata.stream_ordering)) self.get_success(per_dest_queue._catch_up_transmission_loop()) # We expect only the last message from the remote, event_2, to have been # sent, rather than the last *local* event that was sent. self.assertEqual(len(sent_pdus), 1) self.assertEqual(sent_pdus[0].event_id, event_2.event_id) self.assertFalse(per_dest_queue._catching_up)
def test_catch_up_last_successful_stream_ordering_tracking(self): """ Tests that we populate the `destination_rooms` table as needed. """ self.register_user("u1", "you the one") u1_token = self.login("u1", "you the one") room = self.helper.create_room_as("u1", tok=u1_token) # take the remote offline self.is_online = False self.get_success( event_injection.inject_member_event(self.hs, room, "@user:host2", "join") ) self.helper.send(room, "wombats!", tok=u1_token) self.pump() lsso_1 = self.get_success( self.hs.get_datastores().main.get_destination_last_successful_stream_ordering( "host2" ) ) self.assertIsNone( lsso_1, "There should be no last successful stream ordering for an always-offline destination", ) # bring the remote online self.is_online = True event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"] lsso_2 = self.get_success( self.hs.get_datastores().main.get_destination_last_successful_stream_ordering( "host2" ) ) row_2 = self.get_destination_room(room) self.assertEqual( self.pdus[0]["content"]["body"], "rabbits!", "Test fault: didn't receive the right PDU", ) self.assertEqual( row_2["event_id"], event_id_2, "Test fault: destination_rooms not updated correctly", ) self.assertEqual( lsso_2, row_2["stream_ordering"], "Send succeeded but not marked as last_successful_stream_ordering", )
def _create_rooms_and_inject_memberships( self, creator: str, token: str, joiner: str ) -> Tuple[str, str]: """Create a public and private room as a normal user. Then get the `joiner` into those rooms. """ public_room = self.helper.create_room_as( creator, is_public=True, # See https://github.com/matrix-org/synapse/issues/10951 extra_content={"visibility": "public"}, tok=token, ) private_room = self.helper.create_room_as(creator, is_public=False, tok=token) # HACK: get the user into these rooms self.get_success(inject_member_event(self.hs, public_room, joiner, "join")) self.get_success(inject_member_event(self.hs, private_room, joiner, "join")) return public_room, private_room
def test_get_joined_users_from_context(self): room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) bob_event = self.get_success( event_injection.inject_member_event( self.hs, room, self.u_bob, Membership.JOIN ) ) # first, create a regular event event, context = self.get_success( event_injection.create_event( self.hs, room_id=room, sender=self.u_alice, prev_event_ids=[bob_event.event_id], type="m.test.1", content={}, ) ) users = self.get_success( self.store.get_joined_users_from_context(event, context) ) self.assertEqual(users.keys(), {self.u_alice, self.u_bob}) # Regression test for #7376: create a state event whose key matches bob's # user_id, but which is *not* a membership event, and persist that; then check # that `get_joined_users_from_context` returns the correct users for the next event. non_member_event = self.get_success( event_injection.inject_event( self.hs, room_id=room, sender=self.u_bob, prev_event_ids=[bob_event.event_id], type="m.test.2", state_key=self.u_bob, content={}, ) ) event, context = self.get_success( event_injection.create_event( self.hs, room_id=room, sender=self.u_alice, prev_event_ids=[non_member_event.event_id], type="m.test.3", content={}, ) ) users = self.get_success( self.store.get_joined_users_from_context(event, context) ) self.assertEqual(users.keys(), {self.u_alice, self.u_bob})
def test_joining_private_room_with_excluded_user(self) -> None: """ When a user excluded from the user directory, E say, joins a private room, E will not appear in the `users_who_share_private_rooms` table. When a normal user, U say, joins a private room containing E, then U will appear in the `users_who_share_private_rooms` table, but E will not. """ # Setup a support and two normal users. alice = self.register_user("alice", "pass") alice_token = self.login(alice, "pass") bob = self.register_user("bob", "pass") bob_token = self.login(bob, "pass") support = "@support1:test" self.get_success( self.store.register_user( user_id=support, password_hash=None, user_type=UserTypes.SUPPORT ) ) # Alice makes a room. Inject the support user into the room. room = self.helper.create_room_as(alice, is_public=False, tok=alice_token) self.get_success(inject_member_event(self.hs, room, support, "join")) # Check the DB state. The support user should not be in the directory. users, in_public, in_private = self.get_success( self.user_dir_helper.get_tables() ) self.assertEqual(users, {alice, bob}) self.assertEqual(in_public, set()) self.assertEqual(in_private, set()) # Then invite Bob, who accepts. self.helper.invite(room, alice, bob, tok=alice_token) self.helper.join(room, bob, tok=bob_token) # Check the DB state. The support user should not be in the directory. users, in_public, in_private = self.get_success( self.user_dir_helper.get_tables() ) self.assertEqual(users, {alice, bob}) self.assertEqual(in_public, set()) self.assertEqual(in_private, {(alice, bob, room), (bob, alice, room)})
def test_user_not_in_users_table(self) -> None: """Unclear how it happens, but on matrix.org we've seen join events for users who aren't in the users table. Test that we don't fall over when processing such a user. """ user1 = self.register_user("user1", "pass") token1 = self.login(user1, "pass") room = self.helper.create_room_as(user1, is_public=True, tok=token1) # Inject a join event for a user who doesn't exist self.get_success(inject_member_event(self.hs, room, "@not-a-user:test", "join")) # Another new user registers and joins the room user2 = self.register_user("user2", "pass") token2 = self.login(user2, "pass") self.helper.join(room, user2, tok=token2) # The dodgy event should not have stopped us from processing user2's join. in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms()) self.assertEqual(set(in_public), {(user1, room), (user2, room)})
def test_backfill_with_many_backward_extremities(self) -> None: """ Check that we can backfill with many backward extremities. The goal is to make sure that when we only use a portion of backwards extremities(the magic number is more than 5), no errors are thrown. Regression test, see #11027 """ # create the room user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) room_version = self.get_success(self.store.get_room_version(room_id)) # we need a user on the remote server to be a member, so that we can send # extremity-causing events. remote_server_user_id = f"@user:{self.OTHER_SERVER_NAME}" self.get_success( event_injection.inject_member_event( self.hs, room_id, remote_server_user_id, "join" ) ) send_result = self.helper.send(room_id, "first message", tok=tok) ev1 = self.get_success( self.store.get_event(send_result["event_id"], allow_none=False) ) current_state = self.get_success( self.store.get_events_as_list( ( self.get_success(self.store.get_partial_current_state_ids(room_id)) ).values() ) ) # Create "many" backward extremities. The magic number we're trying to # create more than is 5 which corresponds to the number of backward # extremities we slice off in `_maybe_backfill_inner` federation_event_handler = self.hs.get_federation_event_handler() auth_events = [ ev for ev in current_state if (ev.type, ev.state_key) in {("m.room.create", ""), ("m.room.member", remote_server_user_id)} ] for _ in range(0, 8): event = make_event_from_dict( self.add_hashes_and_signatures( { "origin_server_ts": 1, "type": "m.room.message", "content": { "msgtype": "m.text", "body": "message connected to fake event", }, "room_id": room_id, "sender": remote_server_user_id, "prev_events": [ ev1.event_id, # We're creating an backward extremity each time thanks # to this fake event generate_fake_event_id(), ], "auth_events": [ev.event_id for ev in auth_events], "depth": ev1.depth + 1, }, room_version, ), room_version, ) # we poke this directly into _process_received_pdu, to avoid the # federation handler wanting to backfill the fake event. self.get_success( federation_event_handler._process_received_pdu( self.OTHER_SERVER_NAME, event, state_ids={ (e.type, e.state_key): e.event_id for e in current_state }, ) ) # we should now have 8 backwards extremities. backwards_extremities = self.get_success( self.store.db_pool.simple_select_list( "event_backward_extremities", keyvalues={"room_id": room_id}, retcols=["event_id"], ) ) self.assertEqual(len(backwards_extremities), 8) current_depth = 1 limit = 100 with LoggingContext("receive_pdu"): # Make sure backfill still works d = run_in_background( self.hs.get_federation_handler().maybe_backfill, room_id, current_depth, limit, ) self.get_success(d)
def test_catch_up_on_synapse_startup(self): """ Tests the behaviour of get_catch_up_outstanding_destinations and _wake_destinations_needing_catchup. """ # list of sorted server names (note that there are more servers than the batch # size used in get_catch_up_outstanding_destinations). server_names = ["server%02d" % number for number in range(42)] + ["zzzerver"] # ARRANGE: # - a local user (u1) # - a room which u1 is joined to (and remote users @user:serverXX are # joined to) # mark the remotes as online self.is_online = True self.register_user("u1", "you the one") u1_token = self.login("u1", "you the one") room_id = self.helper.create_room_as("u1", tok=u1_token) for server_name in server_names: self.get_success( event_injection.inject_member_event(self.hs, room_id, "@user:%s" % server_name, "join")) # create an event self.helper.send(room_id, "deary me!", tok=u1_token) # ASSERT: # - All servers are up to date so none should have outstanding catch-up outstanding_when_successful = self.get_success( self.hs.get_datastore().get_catch_up_outstanding_destinations( None)) self.assertEqual(outstanding_when_successful, []) # ACT: # - Make the remote servers unreachable self.is_online = False # - Mark zzzerver as being backed-off from now = self.clock.time_msec() self.get_success(self.hs.get_datastore().set_destination_retry_timings( "zzzerver", now, now, 24 * 60 * 60 * 1000 # retry in 1 day )) # - Send an event self.helper.send(room_id, "can anyone hear me?", tok=u1_token) # ASSERT (get_catch_up_outstanding_destinations): # - all remotes are outstanding # - they are returned in batches of 25, in order outstanding_1 = self.get_success( self.hs.get_datastore().get_catch_up_outstanding_destinations( None)) self.assertEqual(len(outstanding_1), 25) self.assertEqual(outstanding_1, server_names[0:25]) outstanding_2 = self.get_success( self.hs.get_datastore().get_catch_up_outstanding_destinations( outstanding_1[-1])) self.assertNotIn("zzzerver", outstanding_2) self.assertEqual(len(outstanding_2), 17) self.assertEqual(outstanding_2, server_names[25:-1]) # ACT: call _wake_destinations_needing_catchup # patch wake_destination to just count the destinations instead woken = [] def wake_destination_track(destination): woken.append(destination) self.hs.get_federation_sender( ).wake_destination = wake_destination_track # cancel the pre-existing timer for _wake_destinations_needing_catchup # this is because we are calling it manually rather than waiting for it # to be called automatically self.hs.get_federation_sender()._catchup_after_startup_timer.cancel() self.get_success(self.hs.get_federation_sender(). _wake_destinations_needing_catchup(), by=5.0) # ASSERT (_wake_destinations_needing_catchup): # - all remotes are woken up, save for zzzerver self.assertNotIn("zzzerver", woken) # - all destinations are woken exactly once; they appear once in woken. self.assertCountEqual(woken, server_names[:-1])
def test_catch_up_loop(self): """ Tests the behaviour of _catch_up_transmission_loop. """ # ARRANGE: # - a local user (u1) # - 3 rooms which u1 is joined to (and remote user @user:host2 is # joined to) # - some events (1 to 5) in those rooms # we have 'already sent' events 1 and 2 to host2 per_dest_queue, sent_pdus = self.make_fake_destination_queue() self.register_user("u1", "you the one") u1_token = self.login("u1", "you the one") room_1 = self.helper.create_room_as("u1", tok=u1_token) room_2 = self.helper.create_room_as("u1", tok=u1_token) room_3 = self.helper.create_room_as("u1", tok=u1_token) self.get_success( event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join")) self.get_success( event_injection.inject_member_event(self.hs, room_2, "@user:host2", "join")) self.get_success( event_injection.inject_member_event(self.hs, room_3, "@user:host2", "join")) # create some events self.helper.send(room_1, "you hear me!!", tok=u1_token) event_id_2 = self.helper.send(room_2, "wombats!", tok=u1_token)["event_id"] self.helper.send(room_3, "Matrix!", tok=u1_token) event_id_4 = self.helper.send(room_2, "rabbits!", tok=u1_token)["event_id"] event_id_5 = self.helper.send(room_3, "Synapse!", tok=u1_token)["event_id"] # destination_rooms should already be populated, but let us pretend that we already # sent (successfully) up to and including event id 2 event_2 = self.get_success( self.hs.get_datastore().get_event(event_id_2)) # also fetch event 5 so we know its last_successful_stream_ordering later event_5 = self.get_success( self.hs.get_datastore().get_event(event_id_5)) self.get_success(self.hs.get_datastore( ).set_destination_last_successful_stream_ordering( "host2", event_2.internal_metadata.stream_ordering)) # ACT self.get_success(per_dest_queue._catch_up_transmission_loop()) # ASSERT, noticing in particular: # - event 3 not sent out, because event 5 replaces it # - order is least recent first, so event 5 comes after event 4 # - catch-up is completed self.assertEqual(len(sent_pdus), 2) self.assertEqual(sent_pdus[0].event_id, event_id_4) self.assertEqual(sent_pdus[1].event_id, event_id_5) self.assertFalse(per_dest_queue._catching_up) self.assertEqual( per_dest_queue._last_successful_stream_ordering, event_5.internal_metadata.stream_ordering, )
def test_catch_up_from_blank_state(self): """ Runs an overall test of federation catch-up from scratch. Further tests will focus on more narrow aspects and edge-cases, but I hope to provide an overall view with this test. """ # bring the other server online self.is_online = True # let's make some events for the other server to receive self.register_user("u1", "you the one") u1_token = self.login("u1", "you the one") room_1 = self.helper.create_room_as("u1", tok=u1_token) room_2 = self.helper.create_room_as("u1", tok=u1_token) # also critical to federate self.get_success( event_injection.inject_member_event(self.hs, room_1, "@user:host2", "join")) self.get_success( event_injection.inject_member_event(self.hs, room_2, "@user:host2", "join")) self.helper.send_state(room_1, event_type="m.room.topic", body={"topic": "wombat"}, tok=u1_token) # check: PDU received for topic event self.assertEqual(len(self.pdus), 1) self.assertEqual(self.pdus[0]["type"], "m.room.topic") # take the remote offline self.is_online = False # send another event self.helper.send(room_1, "hi user!", tok=u1_token) # check: things didn't go well since the remote is down self.assertEqual(len(self.failed_pdus), 1) self.assertEqual(self.failed_pdus[0]["content"]["body"], "hi user!") # let's delete the federation transmission queue # (this pretends we are starting up fresh.) self.assertFalse(self.hs.get_federation_sender( )._per_destination_queues["host2"].transmission_loop_running) del self.hs.get_federation_sender()._per_destination_queues["host2"] # let's also clear any backoffs self.get_success(self.hs.get_datastore().set_destination_retry_timings( "host2", None, 0, 0)) # bring the remote online and clear the received pdu list self.is_online = True self.pdus = [] # now we need to initiate a federation transaction somehow… # to do that, let's send another event (because it's simple to do) # (do it to another room otherwise the catch-up logic decides it doesn't # need to catch up room_1 — something I overlooked when first writing # this test) self.helper.send(room_2, "wombats!", tok=u1_token) # we should now have received both PDUs self.assertEqual(len(self.pdus), 2) self.assertEqual(self.pdus[0]["content"]["body"], "hi user!") self.assertEqual(self.pdus[1]["content"]["body"], "wombats!")
def _test_process_pulled_event_with_missing_state( self, prev_exists_as_outlier: bool) -> None: OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}" main_store = self.hs.get_datastores().main state_storage_controller = self.hs.get_storage_controllers().state # create the room user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) room_version = self.get_success(main_store.get_room_version(room_id)) # allow the remote user to send state events self.helper.send_state( room_id, "m.room.power_levels", { "events_default": 0, "state_default": 0 }, tok=tok, ) # add the remote user to the room member_event = self.get_success( event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")) initial_state_map = self.get_success( main_store.get_partial_current_state_ids(room_id)) auth_event_ids = [ initial_state_map[("m.room.create", "")], initial_state_map[("m.room.power_levels", "")], member_event.event_id, ] # mock up a load of state events which we are missing state_events = [ make_event_from_dict( self.add_hashes_and_signatures({ "type": "test_state_type", "state_key": f"state_{i}", "room_id": room_id, "sender": OTHER_USER, "prev_events": [member_event.event_id], "auth_events": auth_event_ids, "origin_server_ts": 1, "depth": 10, "content": { "body": f"state_{i}" }, }), room_version, ) for i in range(1, 10) ] # this is the state that we are going to claim is active at the prev_event. state_at_prev_event = state_events + self.get_success( main_store.get_events_as_list(initial_state_map.values())) # mock up a prev event. # Depending on the test, we either persist this upfront (as an outlier), # or let the server request it. prev_event = make_event_from_dict( self.add_hashes_and_signatures({ "type": "test_regular_type", "room_id": room_id, "sender": OTHER_USER, "prev_events": [], "auth_events": auth_event_ids, "origin_server_ts": 1, "depth": 11, "content": { "body": "missing_prev" }, }), room_version, ) if prev_exists_as_outlier: prev_event.internal_metadata.outlier = True persistence = self.hs.get_storage_controllers().persistence self.get_success( persistence.persist_event( prev_event, EventContext.for_outlier( self.hs.get_storage_controllers()), )) else: async def get_event(destination: str, event_id: str, timeout=None): self.assertEqual(destination, self.OTHER_SERVER_NAME) self.assertEqual(event_id, prev_event.event_id) return {"pdus": [prev_event.get_pdu_json()]} self.mock_federation_transport_client.get_event.side_effect = get_event # mock up a regular event to pass into _process_pulled_event pulled_event = make_event_from_dict( self.add_hashes_and_signatures({ "type": "test_regular_type", "room_id": room_id, "sender": OTHER_USER, "prev_events": [prev_event.event_id], "auth_events": auth_event_ids, "origin_server_ts": 1, "depth": 12, "content": { "body": "pulled" }, }), room_version, ) # we expect an outbound request to /state_ids, so stub that out self.mock_federation_transport_client.get_room_state_ids.return_value = ( make_awaitable({ "pdu_ids": [e.event_id for e in state_at_prev_event], "auth_chain_ids": [], })) # we also expect an outbound request to /state self.mock_federation_transport_client.get_room_state.return_value = ( make_awaitable( StateRequestResponse(auth_events=[], state=state_at_prev_event))) # we have to bump the clock a bit, to keep the retry logic in # FederationClient.get_pdu happy self.reactor.advance(60000) # Finally, the call under test: send the pulled event into _process_pulled_event with LoggingContext("test"): self.get_success( self.hs.get_federation_event_handler()._process_pulled_event( self.OTHER_SERVER_NAME, pulled_event, backfilled=False)) # check that the event is correctly persisted persisted = self.get_success( main_store.get_event(pulled_event.event_id)) self.assertIsNotNone(persisted, "pulled event was not persisted at all") self.assertFalse(persisted.internal_metadata.is_outlier(), "pulled event was an outlier") # check that the state at that event is as expected state = self.get_success( state_storage_controller.get_state_ids_for_event( pulled_event.event_id)) expected_state = {(e.type, e.state_key): e.event_id for e in state_at_prev_event} self.assertEqual(state, expected_state) if prev_exists_as_outlier: self.mock_federation_transport_client.get_event.assert_not_called()
def test_send_local_online_presence_to_federation(self): """Tests that send_local_presence_to_users sends local online presence to remote users.""" # Create a user who will send presence updates self.presence_sender_id = self.register_user("presence_sender", "monkey") self.presence_sender_tok = self.login("presence_sender", "monkey") # And a room they're a part of room_id = self.helper.create_room_as( self.presence_sender_id, tok=self.presence_sender_tok, ) # Mark them as online send_presence_update( self, self.presence_sender_id, self.presence_sender_tok, "online", "I'm online!", ) # Make up a remote user to send presence to remote_user_id = "@far_away_person:island" # Create a join membership event for the remote user into the room. # This allows presence information to flow from one user to the other. self.get_success( inject_member_event( self.hs, room_id, sender=remote_user_id, target=remote_user_id, membership="join", )) # The remote user would have received the existing room members' presence # when they joined the room. # # Thus we reset the mock, and try sending online local user # presence again self.hs.get_federation_transport_client().send_transaction.reset_mock() # Broadcast local user online presence self.get_success( self.module_api.send_local_online_presence_to([remote_user_id])) # Check that a presence update was sent as part of a federation transaction found_update = False calls = (self.hs.get_federation_transport_client().send_transaction. call_args_list) for call in calls: call_args = call[0] federation_transaction = call_args[0] # type: Transaction # Get the sent EDUs in this transaction edus = federation_transaction.get_dict()["edus"] for edu in edus: # Make sure we're only checking presence-type EDUs if edu["edu_type"] != EduTypes.Presence: continue # EDUs can contain multiple presence updates for presence_update in edu["content"]["push"]: if presence_update["user_id"] == self.presence_sender_id: found_update = True self.assertTrue(found_update)
def test_update_function_state_row_limit(self): """Test replication with many state events over several stream ids.""" # we want to generate lots of state changes, but for this test, we want to # spread out the state changes over a few stream IDs. # # We do this by having two branches in the DAG. On one, we have four moderators, # each of which that generates lots of state; on the other, we de-op the users, # thus invalidating all the state. NUM_USERS = 4 STATES_PER_USER = _STREAM_UPDATE_TARGET_ROW_COUNT // 4 + 1 user_ids = ["@user%i:localhost" % (i, ) for i in range(NUM_USERS)] # have the users join for u in user_ids: self.get_success( inject_member_event(self.hs, self.room_id, u, Membership.JOIN)) # Update existing power levels with mod at PL50 pls = self.helper.get_state(self.room_id, EventTypes.PowerLevels, tok=self.user_tok) pls["users"].update({u: 50 for u in user_ids}) self.helper.send_state( self.room_id, EventTypes.PowerLevels, pls, tok=self.user_tok, ) # this is the point in the DAG where we make a fork fork_point: List[str] = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room( self.room_id)) events: List[EventBase] = [] for user in user_ids: events.extend( self._inject_state_event(sender=user) for _ in range(STATES_PER_USER)) self.replicate() # all those events and state changes should have landed self.assertGreaterEqual(len(self.test_handler.received_rdata_rows), 2 * len(events)) # disconnect, so that we can stack up the changes self.disconnect() self.test_handler.received_rdata_rows.clear() # now roll back all that state by de-modding the users prev_events = fork_point pl_events = [] for u in user_ids: pls["users"][u] = 0 e = self.get_success( inject_event( self.hs, prev_event_ids=prev_events, type=EventTypes.PowerLevels, state_key="", sender=self.user_id, room_id=self.room_id, content=pls, )) prev_events = [e.event_id] pl_events.append(e) # check we're testing what we think we are: no rows should yet have been # received self.assertEqual([], self.test_handler.received_rdata_rows) # now reconnect to pull the updates self.reconnect() self.replicate() # we should have received all the expected rows in the right order (as # well as various cache invalidation updates which we ignore) received_rows = [ row for row in self.test_handler.received_rdata_rows if row[0] == "events" ] self.assertGreaterEqual(len(received_rows), len(events)) for i in range(NUM_USERS): # for each user, we expect the PL event row, followed by state rows for # the PL event and each of the states that got reverted. stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, pl_events[i].event_id) # the state rows are unsorted state_rows: List[EventsStreamCurrentStateRow] = [] for _ in range(STATES_PER_USER + 1): stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) state_rows.append(row.data) state_rows.sort(key=lambda r: r.state_key) sr = state_rows.pop(0) self.assertEqual(sr.type, EventTypes.PowerLevels) self.assertEqual(sr.event_id, pl_events[i].event_id) for sr in state_rows: self.assertEqual(sr.type, "test_state_event") # "None" indicates the state has been deleted self.assertIsNone(sr.event_id) self.assertEqual([], received_rows)
def test_update_function_huge_state_change(self): """Test replication with many state events Ensures that all events are correctly replicated when there are lots of state change rows to be replicated. """ # we want to generate lots of state changes at a single stream ID. # # We do this by having two branches in the DAG. On one, we have a moderator # which that generates lots of state; on the other, we de-op the moderator, # thus invalidating all the state. OTHER_USER = "******" # have the user join self.get_success( inject_member_event(self.hs, self.room_id, OTHER_USER, Membership.JOIN)) # Update existing power levels with mod at PL50 pls = self.helper.get_state(self.room_id, EventTypes.PowerLevels, tok=self.user_tok) pls["users"][OTHER_USER] = 50 self.helper.send_state( self.room_id, EventTypes.PowerLevels, pls, tok=self.user_tok, ) # this is the point in the DAG where we make a fork fork_point: List[str] = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room( self.room_id)) events = [ self._inject_state_event(sender=OTHER_USER) for _ in range(_STREAM_UPDATE_TARGET_ROW_COUNT) ] self.replicate() # all those events and state changes should have landed self.assertGreaterEqual(len(self.test_handler.received_rdata_rows), 2 * len(events)) # disconnect, so that we can stack up the changes self.disconnect() self.test_handler.received_rdata_rows.clear() # a state event which doesn't get rolled back, to check that the state # before the huge update comes through ok state1 = self._inject_state_event() # roll back all the state by de-modding the user prev_events = fork_point pls["users"][OTHER_USER] = 0 pl_event = self.get_success( inject_event( self.hs, prev_event_ids=prev_events, type=EventTypes.PowerLevels, state_key="", sender=self.user_id, room_id=self.room_id, content=pls, )) # one more bit of state that doesn't get rolled back state2 = self._inject_state_event() # check we're testing what we think we are: no rows should yet have been # received self.assertEqual([], self.test_handler.received_rdata_rows) # now reconnect to pull the updates self.reconnect() self.replicate() # we should have received all the expected rows in the right order (as # well as various cache invalidation updates which we ignore) # # we expect: # # - two rows for state1 # - the PL event row, plus state rows for the PL event and each # of the states that got reverted. # - two rows for state2 received_rows = [ row for row in self.test_handler.received_rdata_rows if row[0] == "events" ] # first check the first two rows, which should be state1 stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, state1.event_id) stream_name, token, row = received_rows.pop(0) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) self.assertEqual(row.data.event_id, state1.event_id) # now the last two rows, which should be state2 stream_name, token, row = received_rows.pop(-2) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, state2.event_id) stream_name, token, row = received_rows.pop(-1) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) self.assertEqual(row.data.event_id, state2.event_id) # that should leave us with the rows for the PL event self.assertEqual(len(received_rows), len(events) + 2) stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "ev") self.assertIsInstance(row.data, EventsStreamEventRow) self.assertEqual(row.data.event_id, pl_event.event_id) # the state rows are unsorted state_rows: List[EventsStreamCurrentStateRow] = [] for stream_name, _, row in received_rows: self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") self.assertIsInstance(row.data, EventsStreamCurrentStateRow) state_rows.append(row.data) state_rows.sort(key=lambda r: r.state_key) sr = state_rows.pop(0) self.assertEqual(sr.type, EventTypes.PowerLevels) self.assertEqual(sr.event_id, pl_event.event_id) for sr in state_rows: self.assertEqual(sr.type, "test_state_event") # "None" indicates the state has been deleted self.assertIsNone(sr.event_id)