def test_bulk_message_fetching(self) -> None: sender = self.example_user('othello') receiver = self.example_user('hamlet') pm_recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL) stream_name = 'Çiğdem' stream = self.make_stream(stream_name) stream_recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM) sending_client = make_client(name="test suite") ids = [] for i in range(300): for recipient in [pm_recipient, stream_recipient]: message = Message( sender=sender, recipient=recipient, content=f'whatever {i}', rendered_content='DOES NOT MATTER', rendered_content_version=markdown_version, date_sent=timezone_now(), sending_client=sending_client, last_edit_time=timezone_now(), edit_history='[]', ) message.set_topic_name('whatever') message.save() ids.append(message.id) Reaction.objects.create(user_profile=sender, message=message, emoji_name='simple_smile') num_ids = len(ids) self.assertTrue(num_ids >= 600) flush_per_request_caches() t = time.time() with queries_captured() as queries: rows = list(MessageDict.get_raw_db_rows(ids)) objs = [ MessageDict.build_dict_from_raw_db_row(row) for row in rows ] MessageDict.post_process_dicts(objs, apply_markdown=False, client_gravatar=False) delay = time.time() - t # Make sure we don't take longer than 1.5ms per message to # extract messages. Note that we increased this from 1ms to # 1.5ms to handle tests running in parallel being a bit # slower. error_msg = f"Number of ids: {num_ids}. Time delay: {delay}" self.assertTrue(delay < 0.0015 * num_ids, error_msg) self.assert_length(queries, 7) self.assertEqual(len(rows), num_ids)
def get_fetch_payload(msg_id: int, apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]: msg = reload_message(msg_id) unhydrated_dict = MessageDict.to_dict_uncached_helper([msg])[0] # The next step mutates the dict in place # for performance reasons. MessageDict.post_process_dicts( [unhydrated_dict], apply_markdown=apply_markdown, client_gravatar=client_gravatar, ) final_dict = unhydrated_dict return final_dict
def test_bulk_message_fetching(self) -> None: sender = self.example_user("othello") receiver = self.example_user("hamlet") pm_recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL) stream_name = "Çiğdem" stream = self.make_stream(stream_name) stream_recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM) sending_client = make_client(name="test suite") ids = [] for i in range(300): for recipient in [pm_recipient, stream_recipient]: message = Message( sender=sender, recipient=recipient, content=f"whatever {i}", rendered_content="DOES NOT MATTER", rendered_content_version=markdown_version, date_sent=timezone_now(), sending_client=sending_client, last_edit_time=timezone_now(), edit_history="[]", ) message.set_topic_name("whatever") message.save() ids.append(message.id) Reaction.objects.create(user_profile=sender, message=message, emoji_name="simple_smile") num_ids = len(ids) self.assertTrue(num_ids >= 600) flush_per_request_caches() with queries_captured() as queries: rows = list(MessageDict.get_raw_db_rows(ids)) objs = [ MessageDict.build_dict_from_raw_db_row(row) for row in rows ] MessageDict.post_process_dicts(objs, apply_markdown=False, client_gravatar=False) self.assert_length(queries, 7) self.assert_length(rows, num_ids)