def test_prune_internal_data(self) -> None: user_profile = self.example_user("hamlet") queue_data = dict( all_public_streams=True, apply_markdown=True, client_gravatar=True, client_type_name="website", event_types=["message"], last_connection_time=time.time(), queue_timeout=600, realm_id=user_profile.realm.id, user_profile_id=user_profile.id, ) client = allocate_client_descriptor(queue_data) self.assertTrue(client.event_queue.empty()) self.send_stream_message( self.example_user("iago"), "Denmark", content="@**King Hamlet** what's up?" ) self.send_stream_message( self.example_user("iago"), "Denmark", content="@**all** what's up?" ) self.send_personal_message(self.example_user("iago"), user_profile) events = client.event_queue.contents() self.assert_length(events, 3) self.assertFalse("internal_data" in events[0]) self.assertFalse("internal_data" in events[1]) self.assertFalse("internal_data" in events[2]) events = client.event_queue.contents(include_internal_data=True) self.assertTrue("internal_data" in events[0]) self.assertTrue("internal_data" in events[1]) self.assertTrue("internal_data" in events[2])
def test_get_client_info_for_all_public_streams(self) -> None: hamlet = self.example_user("hamlet") realm = hamlet.realm queue_data = dict( all_public_streams=True, apply_markdown=True, client_gravatar=True, client_type_name="website", event_types=["message"], last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) message_event = dict( realm_id=realm.id, stream_name="whatever", ) client_info = get_client_info_for_message_event( message_event, users=[], ) self.assert_length(client_info, 1) dct = client_info[client.event_queue.id] self.assertEqual(dct["client"].apply_markdown, True) self.assertEqual(dct["client"].client_gravatar, True) self.assertEqual(dct["client"].user_profile_id, hamlet.id) self.assertEqual(dct["flags"], []) self.assertEqual(dct["is_sender"], False) message_event = dict( realm_id=realm.id, stream_name="whatever", sender_queue_id=client.event_queue.id, ) client_info = get_client_info_for_message_event( message_event, users=[], ) dct = client_info[client.event_queue.id] self.assertEqual(dct["is_sender"], True)
def get_client_descriptor(self) -> ClientDescriptor: hamlet = self.example_user("hamlet") realm = hamlet.realm queue_data = dict( all_public_streams=False, apply_markdown=False, client_gravatar=True, client_type_name="website", event_types=None, last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) return client
def test_get_info(apply_markdown: bool, client_gravatar: bool) -> None: clear_client_event_queues_for_testing() queue_data = dict( all_public_streams=False, apply_markdown=apply_markdown, client_gravatar=client_gravatar, client_type_name="website", event_types=["message"], last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) message_event = dict( realm_id=realm.id, stream_name="whatever", ) client_info = get_client_info_for_message_event( message_event, users=[ dict(id=cordelia.id), ], ) self.assert_length(client_info, 0) client_info = get_client_info_for_message_event( message_event, users=[ dict(id=cordelia.id), dict(id=hamlet.id, flags=["mentioned"]), ], ) self.assert_length(client_info, 1) dct = client_info[client.event_queue.id] self.assertEqual(dct["client"].apply_markdown, apply_markdown) self.assertEqual(dct["client"].client_gravatar, client_gravatar) self.assertEqual(dct["client"].user_profile_id, hamlet.id) self.assertEqual(dct["flags"], ["mentioned"]) self.assertEqual(dct["is_sender"], False)
def test_restart(self) -> None: hamlet = self.example_user("hamlet") realm = hamlet.realm clear_client_event_queues_for_testing() queue_data = dict( all_public_streams=False, apply_markdown=True, client_gravatar=True, client_type_name="website", event_types=None, last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) send_restart_events(immediate=True) # For now we only verify that a virtual event # gets added to the client's event_queue. We # may decide to write a deeper test in the future # that exercises the finish_handler. virtual_events = client.event_queue.virtual_events self.assert_length(virtual_events, 1) restart_event = virtual_events["restart"] check_restart_event("restart_event", restart_event) self.assertEqual( restart_event, dict( type="restart", zulip_version=ZULIP_VERSION, zulip_merge_base=ZULIP_MERGE_BASE, zulip_feature_level=API_FEATURE_LEVEL, server_generation=settings.SERVER_GENERATION, immediate=True, id=0, ), )
def test_restart(self) -> None: hamlet = self.example_user("hamlet") realm = hamlet.realm clear_client_event_queues_for_testing() queue_data = dict( all_public_streams=False, apply_markdown=True, client_gravatar=True, client_type_name="website", event_types=None, last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) send_restart_events(immediate=True) # For now we only verify that a virtual event # gets added to the client's event_queue. We # may decide to write a deeper test in the future # that exercises the finish_handler. virtual_events = client.event_queue.virtual_events self.assertEqual(len(virtual_events), 1) restart_event = virtual_events["restart"] # TODO: add a schema checker for this to event_schema.py # and exercise it here (as well as the more concrete # check) self.assertEqual( restart_event, dict( type="restart", server_generation=settings.SERVER_GENERATION, immediate=True, id=0, ), )
def test_stream_watchers(self) -> None: ''' We used to have a bug with stream_watchers, where we set their flags to None. ''' cordelia = self.example_user('cordelia') hamlet = self.example_user('hamlet') realm = hamlet.realm stream_name = 'Denmark' self.unsubscribe(hamlet, stream_name) clear_client_event_queues_for_testing() queue_data = dict( all_public_streams=True, apply_markdown=True, client_gravatar=True, client_type_name='home grown api program', event_types=['message'], last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) self.send_stream_message(cordelia.email, stream_name) self.assertEqual(len(client.event_queue.contents()), 1) # This next line of code should silently succeed and basically do # nothing under the covers. This test is here to prevent a bug # from re-appearing. missedmessage_hook( user_profile_id=hamlet.id, client=client, last_for_client=True, )
def test_stream_watchers(self) -> None: ''' We used to have a bug with stream_watchers, where we set their flags to None. ''' cordelia = self.example_user('cordelia') hamlet = self.example_user('hamlet') realm = hamlet.realm stream_name = 'Denmark' self.unsubscribe(hamlet, stream_name) clear_client_event_queues_for_testing() queue_data = dict( all_public_streams=True, apply_markdown=True, client_gravatar=True, client_type_name='home grown api program', event_types=['message'], last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) self.send_stream_message(cordelia.email, stream_name) self.assertEqual(len(client.event_queue.contents()), 1) # This next line of code should silently succeed and basically do # nothing under the covers. This test is here to prevent a bug # from re-appearing. missedmessage_hook( user_profile_id=hamlet.id, client=client, last_for_client=True, )
def test_stream_watchers(self) -> None: """ We used to have a bug with stream_watchers, where we set their flags to None. """ cordelia = self.example_user("cordelia") hamlet = self.example_user("hamlet") realm = hamlet.realm stream_name = "Denmark" self.subscribe(cordelia, stream_name) self.unsubscribe(hamlet, stream_name) queue_data = dict( all_public_streams=True, apply_markdown=True, client_gravatar=True, client_type_name="home grown API program", event_types=["message"], last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) self.send_stream_message(cordelia, stream_name) self.assert_length(client.event_queue.contents(), 1) # This next line of code should silently succeed and basically do # nothing under the covers. This test is here to prevent a bug # from re-appearing. missedmessage_hook( user_profile_id=hamlet.id, client=client, last_for_client=True, )
def test_restart_event_recursive_call_logic(self) -> None: # This is a test for a subtle corner case; see the comments # around RestartEventError for details. hamlet = self.example_user("hamlet") realm = hamlet.realm # Setup an empty event queue clear_client_event_queues_for_testing() queue_data = dict( all_public_streams=False, apply_markdown=True, client_gravatar=True, client_type_name="website", event_types=None, last_connection_time=time.time(), queue_timeout=0, realm_id=realm.id, user_profile_id=hamlet.id, ) client = allocate_client_descriptor(queue_data) # Add a restart event to it. send_restart_events(immediate=True) # Make a second queue after the restart events were sent. second_client = allocate_client_descriptor(queue_data) # Fetch the restart event just sent above, without removing it # from the queue. We will use this as a mock return value in # get_user_events. restart_event = orjson.loads( self.tornado_call( get_events_backend, hamlet, post_data={ "queue_id": client.event_queue.id, "last_event_id": -1, "dont_block": "true", "user_profile_id": hamlet.id, "secret": settings.SHARED_SECRET, "client": "internal", }, client_name="internal", ).content)["events"] # Now the tricky part: We call events_register_backend, # arranging it so that the first `get_user_events` call # returns our restart event (triggering the recursive # behavior), but the second (with a new queue) returns no # events. # # Because get_user_events always returns [] in tests, we need # to mock its return value as well; in an ideal world, we # would only need to mock client / second_client. with mock.patch( "zerver.lib.events.request_event_queue", side_effect=[ client.event_queue.id, second_client.event_queue.id ], ), mock.patch("zerver.lib.events.get_user_events", side_effect=[restart_event, []]): self.tornado_call( events_register_backend, hamlet, { "queue_id": client.event_queue.id, "user_client": "website", "last_event_id": -1, "dont_block": orjson.dumps(True).decode(), }, client_name="website", )