Ejemplo n.º 1
0
 def run_tests(self, test_labels, extra_tests=None,
               full_suite=False, **kwargs):
     # type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool
     self.setup_test_environment()
     try:
         suite = self.build_suite(test_labels, extra_tests)
     except AttributeError:
         traceback.print_exc()
         print()
         print("  This is often caused by a test module/class/function that doesn't exist or ")
         print("  import properly. You can usually debug in a `manage.py shell` via e.g. ")
         print("    import zerver.tests.test_messages")
         print("    from zerver.tests.test_messages import StreamMessagesTest")
         print("    StreamMessagesTest.test_message_to_stream")
         print()
         sys.exit(1)
     # We have to do the next line to avoid flaky scenarios where we
     # run a single test and getting an SA connection causes data from
     # a Django connection to be rolled back mid-test.
     get_sqlalchemy_connection()
     result = self.run_suite(suite)
     self.teardown_test_environment()
     failed = self.suite_result(suite, result)
     if not failed:
         write_instrumentation_reports(full_suite=full_suite)
     return failed
Ejemplo n.º 2
0
 def run_tests(self,
               test_labels,
               extra_tests=None,
               full_suite=False,
               **kwargs):
     # type: (List[str], Optional[List[TestCase]], bool, **Any) -> bool
     self.setup_test_environment()
     try:
         suite = self.build_suite(test_labels, extra_tests)
     except AttributeError:
         traceback.print_exc()
         print()
         print(
             "  This is often caused by a test module/class/function that doesn't exist or "
         )
         print(
             "  import properly. You can usually debug in a `manage.py shell` via e.g. "
         )
         print("    import zerver.tests.test_messages")
         print(
             "    from zerver.tests.test_messages import StreamMessagesTest"
         )
         print("    StreamMessagesTest.test_message_to_stream")
         print()
         sys.exit(1)
     # We have to do the next line to avoid flaky scenarios where we
     # run a single test and getting an SA connection causes data from
     # a Django connection to be rolled back mid-test.
     get_sqlalchemy_connection()
     result = self.run_suite(suite)
     self.teardown_test_environment()
     failed = result.failed
     if not failed:
         write_instrumentation_reports(full_suite=full_suite)
     return failed
Ejemplo n.º 3
0
    def run_tests(
        self,
        test_labels: List[str],
        extra_tests: Optional[List[TestCase]] = None,
        full_suite: bool = False,
        include_webhooks: bool = False,
        **kwargs: Any,
    ) -> Tuple[bool, List[str]]:
        self.setup_test_environment()
        try:
            suite = self.build_suite(test_labels, extra_tests)
        except AttributeError:
            # We are likely to get here only when running tests in serial
            # mode on Python 3.4 or lower.
            # test_labels are always normalized to include the correct prefix.
            # If we run the command with ./tools/test-backend test_alert_words,
            # test_labels will be equal to ['zerver.tests.test_alert_words'].
            for test_label in test_labels:
                check_import_error(test_label)

            # I think we won't reach this line under normal circumstances, but
            # for some unforeseen scenario in which the AttributeError was not
            # caused by an import error, let's re-raise the exception for
            # debugging purposes.
            raise

        self.test_imports(test_labels, suite)
        if self.parallel == 1:
            # We are running in serial mode so create the databases here.
            # For parallel mode, the databases are created in init_worker.
            # We don't want to create and destroy DB in setup_test_environment
            # because it will be called for both serial and parallel modes.
            # However, at this point we know in which mode we would be running
            # since that decision has already been made in build_suite().
            #
            # We pass a _worker_id, which in this code path is always 0
            destroy_test_databases(_worker_id)
            create_test_databases(_worker_id)

        # We have to do the next line to avoid flaky scenarios where we
        # run a single test and getting an SA connection causes data from
        # a Django connection to be rolled back mid-test.
        get_sqlalchemy_connection()
        result = self.run_suite(suite)
        self.teardown_test_environment()
        failed = self.suite_result(suite, result)
        if not failed:
            write_instrumentation_reports(full_suite=full_suite,
                                          include_webhooks=include_webhooks)
        return failed, result.failed_tests
Ejemplo n.º 4
0
def messages_in_narrow_backend(request, user_profile,
                               msg_ids = REQ(validator=check_list(check_int)),
                               narrow = REQ(converter=narrow_parameter)):
    # type: (HttpRequest, UserProfile, List[int], List[Dict[str, Any]]) -> HttpResponse

    # Note that this function will only work on messages the user
    # actually received

    # TODO: We assume that the narrow is a search.  For now this works because
    # the browser only ever calls this function for searches, since it can't
    # apply that narrow operator itself.

    query = select([column("message_id"), column("subject"), column("rendered_content")],
                   and_(column("user_profile_id") == literal(user_profile.id),
                        column("message_id").in_(msg_ids)),
                   join(table("zerver_usermessage"), table("zerver_message"),
                        literal_column("zerver_usermessage.message_id") ==
                        literal_column("zerver_message.id")))

    builder = NarrowBuilder(user_profile, column("message_id"))
    for term in narrow:
        query = builder.add_term(query, term)

    sa_conn = get_sqlalchemy_connection()
    query_result = list(sa_conn.execute(query).fetchall())

    search_fields = dict()
    for row in query_result:
        (message_id, subject, rendered_content, content_matches, subject_matches) = row
        search_fields[message_id] = get_search_fields(rendered_content, subject,
                                                      content_matches, subject_matches)

    return json_success({"messages": search_fields})
Ejemplo n.º 5
0
    def run_tests(self,
                  test_labels,
                  extra_tests=None,
                  full_suite=False,
                  **kwargs):
        # type: (List[str], Optional[List[TestCase]], bool, **Any) -> Tuple[bool, List[str]]
        self.setup_test_environment()
        try:
            suite = self.build_suite(test_labels, extra_tests)
        except AttributeError:
            traceback.print_exc()
            print()
            print(
                "  This is often caused by a test module/class/function that doesn't exist or "
            )
            print(
                "  import properly. You can usually debug in a `manage.py shell` via e.g. "
            )
            print("    import zerver.tests.test_messages")
            print(
                "    from zerver.tests.test_messages import StreamMessagesTest"
            )
            print("    StreamMessagesTest.test_message_to_stream")
            print()
            sys.exit(1)

        if self.parallel == 1:
            # We are running in serial mode so create the databases here.
            # For parallel mode, the databases are created in init_worker.
            # We don't want to create and destroy DB in setup_test_environment
            # because it will be called for both serial and parallel modes.
            # However, at this point we know in which mode we would be running
            # since that decision has already been made in build_suite().
            destroy_test_databases(self.database_id)
            create_test_databases(self.database_id)

        # We have to do the next line to avoid flaky scenarios where we
        # run a single test and getting an SA connection causes data from
        # a Django connection to be rolled back mid-test.
        get_sqlalchemy_connection()
        result = self.run_suite(suite)
        self.teardown_test_environment()
        failed = self.suite_result(suite, result)
        if not failed:
            write_instrumentation_reports(full_suite=full_suite)
        return failed, result.failed_tests
Ejemplo n.º 6
0
    def run_tests(self, test_labels: List[str],
                  extra_tests: Optional[List[TestCase]]=None,
                  full_suite: bool=False,
                  include_webhooks: bool=False,
                  **kwargs: Any) -> Tuple[bool, List[str]]:
        self.setup_test_environment()
        try:
            suite = self.build_suite(test_labels, extra_tests)
        except AttributeError:
            # We are likely to get here only when running tests in serial
            # mode on Python 3.4 or lower.
            # test_labels are always normalized to include the correct prefix.
            # If we run the command with ./tools/test-backend test_alert_words,
            # test_labels will be equal to ['zerver.tests.test_alert_words'].
            for test_label in test_labels:
                check_import_error(test_label)

            # I think we won't reach this line under normal circumstances, but
            # for some unforeseen scenario in which the AttributeError was not
            # caused by an import error, let's re-raise the exception for
            # debugging purposes.
            raise

        self.test_imports(test_labels, suite)
        if self.parallel == 1:
            # We are running in serial mode so create the databases here.
            # For parallel mode, the databases are created in init_worker.
            # We don't want to create and destroy DB in setup_test_environment
            # because it will be called for both serial and parallel modes.
            # However, at this point we know in which mode we would be running
            # since that decision has already been made in build_suite().
            destroy_test_databases(self.database_id)
            create_test_databases(self.database_id)

        # We have to do the next line to avoid flaky scenarios where we
        # run a single test and getting an SA connection causes data from
        # a Django connection to be rolled back mid-test.
        get_sqlalchemy_connection()
        result = self.run_suite(suite)
        self.teardown_test_environment()
        failed = self.suite_result(suite, result)
        if not failed:
            write_instrumentation_reports(full_suite=full_suite, include_webhooks=include_webhooks)
        return failed, result.failed_tests
Ejemplo n.º 7
0
def messages_in_narrow_backend(
    request: HttpRequest,
    user_profile: UserProfile,
    msg_ids: List[int] = REQ(validator=check_list(check_int)),
    narrow: OptionalNarrowListT = REQ(converter=narrow_parameter),
) -> HttpResponse:

    first_visible_message_id = get_first_visible_message_id(user_profile.realm)
    msg_ids = [
        message_id for message_id in msg_ids
        if message_id >= first_visible_message_id
    ]
    # This query is limited to messages the user has access to because they
    # actually received them, as reflected in `zerver_usermessage`.
    query = select(
        [column("message_id"),
         topic_column_sa(),
         column("rendered_content")],
        and_(
            column("user_profile_id") == literal(user_profile.id),
            column("message_id").in_(msg_ids)),
        join(
            table("zerver_usermessage"), table("zerver_message"),
            literal_column("zerver_usermessage.message_id") == literal_column(
                "zerver_message.id")))

    builder = NarrowBuilder(user_profile, column("message_id"))
    if narrow is not None:
        for term in narrow:
            query = builder.add_term(query, term)

    sa_conn = get_sqlalchemy_connection()
    query_result = list(sa_conn.execute(query).fetchall())

    search_fields = dict()
    for row in query_result:
        message_id = row['message_id']
        topic_name = row[DB_TOPIC_NAME]
        rendered_content = row['rendered_content']
        if 'content_matches' in row:
            content_matches = row['content_matches']
            topic_matches = row['topic_matches']
        else:
            content_matches = topic_matches = []
        search_fields[message_id] = get_search_fields(rendered_content,
                                                      topic_name,
                                                      content_matches,
                                                      topic_matches)

    return json_success({"messages": search_fields})
Ejemplo n.º 8
0
    def run_tests(self, test_labels, extra_tests=None,
                  full_suite=False, **kwargs):
        # type: (List[str], Optional[List[TestCase]], bool, **Any) -> Tuple[bool, List[str]]
        self.setup_test_environment()
        try:
            suite = self.build_suite(test_labels, extra_tests)
        except AttributeError:
            traceback.print_exc()
            print()
            print("  This is often caused by a test module/class/function that doesn't exist or ")
            print("  import properly. You can usually debug in a `manage.py shell` via e.g. ")
            print("    import zerver.tests.test_messages")
            print("    from zerver.tests.test_messages import StreamMessagesTest")
            print("    StreamMessagesTest.test_message_to_stream")
            print()
            sys.exit(1)

        if self.parallel == 1:
            # We are running in serial mode so create the databases here.
            # For parallel mode, the databases are created in init_worker.
            # We don't want to create and destroy DB in setup_test_environment
            # because it will be called for both serial and parallel modes.
            # However, at this point we know in which mode we would be running
            # since that decision has already been made in build_suite().
            destroy_test_databases(self.database_id)
            create_test_databases(self.database_id)

        # We have to do the next line to avoid flaky scenarios where we
        # run a single test and getting an SA connection causes data from
        # a Django connection to be rolled back mid-test.
        get_sqlalchemy_connection()
        result = self.run_suite(suite)
        self.teardown_test_environment()
        failed = self.suite_result(suite, result)
        if not failed:
            write_instrumentation_reports(full_suite=full_suite)
        return failed, result.failed_tests
Ejemplo n.º 9
0
def get_messages_backend(request: HttpRequest, user_profile: UserProfile,
                         anchor_val: Optional[str]=REQ(
                             'anchor', str_validator=check_string, default=None),
                         num_before: int=REQ(converter=to_non_negative_int),
                         num_after: int=REQ(converter=to_non_negative_int),
                         narrow: OptionalNarrowListT=REQ('narrow', converter=narrow_parameter, default=None),
                         use_first_unread_anchor_val: bool=REQ('use_first_unread_anchor',
                                                               validator=check_bool, default=False),
                         client_gravatar: bool=REQ(validator=check_bool, default=False),
                         apply_markdown: bool=REQ(validator=check_bool, default=True)) -> HttpResponse:
    anchor = parse_anchor_value(anchor_val, use_first_unread_anchor_val)
    if num_before + num_after > MAX_MESSAGES_PER_FETCH:
        return json_error(_("Too many messages requested (maximum {}).").format(
            MAX_MESSAGES_PER_FETCH,
        ))

    if user_profile.realm.email_address_visibility != Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
        # If email addresses are only available to administrators,
        # clients cannot compute gravatars, so we force-set it to false.
        client_gravatar = False

    include_history = ok_to_include_history(narrow, user_profile)
    if include_history:
        # The initial query in this case doesn't use `zerver_usermessage`,
        # and isn't yet limited to messages the user is entitled to see!
        #
        # This is OK only because we've made sure this is a narrow that
        # will cause us to limit the query appropriately later.
        # See `ok_to_include_history` for details.
        need_message = True
        need_user_message = False
    elif narrow is None:
        # We need to limit to messages the user has received, but we don't actually
        # need any fields from Message
        need_message = False
        need_user_message = True
    else:
        need_message = True
        need_user_message = True

    query, inner_msg_id_col = get_base_query_for_search(
        user_profile=user_profile,
        need_message=need_message,
        need_user_message=need_user_message,
    )

    query, is_search = add_narrow_conditions(
        user_profile=user_profile,
        inner_msg_id_col=inner_msg_id_col,
        query=query,
        narrow=narrow,
    )

    if narrow is not None:
        # Add some metadata to our logging data for narrows
        verbose_operators = []
        for term in narrow:
            if term['operator'] == "is":
                verbose_operators.append("is:" + term['operand'])
            else:
                verbose_operators.append(term['operator'])
        request._log_data['extra'] = "[{}]".format(",".join(verbose_operators))

    sa_conn = get_sqlalchemy_connection()

    if anchor is None:
        # The use_first_unread_anchor code path
        anchor = find_first_unread_anchor(
            sa_conn,
            user_profile,
            narrow,
        )

    anchored_to_left = (anchor == 0)

    # Set value that will be used to short circuit the after_query
    # altogether and avoid needless conditions in the before_query.
    anchored_to_right = (anchor >= LARGER_THAN_MAX_MESSAGE_ID)
    if anchored_to_right:
        num_after = 0

    first_visible_message_id = get_first_visible_message_id(user_profile.realm)
    query = limit_query_to_range(
        query=query,
        num_before=num_before,
        num_after=num_after,
        anchor=anchor,
        anchored_to_left=anchored_to_left,
        anchored_to_right=anchored_to_right,
        id_col=inner_msg_id_col,
        first_visible_message_id=first_visible_message_id,
    )

    main_query = alias(query)
    query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
    # This is a hack to tag the query we use for testing
    query = query.prefix_with("/* get_messages */")
    rows = list(sa_conn.execute(query).fetchall())

    query_info = post_process_limited_query(
        rows=rows,
        num_before=num_before,
        num_after=num_after,
        anchor=anchor,
        anchored_to_left=anchored_to_left,
        anchored_to_right=anchored_to_right,
        first_visible_message_id=first_visible_message_id,
    )

    rows = query_info['rows']

    # The following is a little messy, but ensures that the code paths
    # are similar regardless of the value of include_history.  The
    # 'user_messages' dictionary maps each message to the user's
    # UserMessage object for that message, which we will attach to the
    # rendered message dict before returning it.  We attempt to
    # bulk-fetch rendered message dicts from remote cache using the
    # 'messages' list.
    message_ids: List[int] = []
    user_message_flags: Dict[int, List[str]] = {}
    if include_history:
        message_ids = [row[0] for row in rows]

        # TODO: This could be done with an outer join instead of two queries
        um_rows = UserMessage.objects.filter(user_profile=user_profile,
                                             message__id__in=message_ids)
        user_message_flags = {um.message_id: um.flags_list() for um in um_rows}

        for message_id in message_ids:
            if message_id not in user_message_flags:
                user_message_flags[message_id] = ["read", "historical"]
    else:
        for row in rows:
            message_id = row[0]
            flags = row[1]
            user_message_flags[message_id] = UserMessage.flags_list_for_flags(flags)
            message_ids.append(message_id)

    search_fields: Dict[int, Dict[str, str]] = dict()
    if is_search:
        for row in rows:
            message_id = row[0]
            (topic_name, rendered_content, content_matches, topic_matches) = row[-4:]

            try:
                search_fields[message_id] = get_search_fields(rendered_content, topic_name,
                                                              content_matches, topic_matches)
            except UnicodeDecodeError as err:  # nocoverage
                # No coverage for this block since it should be
                # impossible, and we plan to remove it once we've
                # debugged the case that makes it happen.
                raise Exception(str(err), message_id, narrow)

    message_list = messages_for_ids(
        message_ids=message_ids,
        user_message_flags=user_message_flags,
        search_fields=search_fields,
        apply_markdown=apply_markdown,
        client_gravatar=client_gravatar,
        allow_edit_history=user_profile.realm.allow_edit_history,
    )

    statsd.incr('loaded_old_messages', len(message_list))

    ret = dict(
        messages=message_list,
        result='success',
        msg='',
        found_anchor=query_info['found_anchor'],
        found_oldest=query_info['found_oldest'],
        found_newest=query_info['found_newest'],
        history_limited=query_info['history_limited'],
        anchor=anchor,
    )
    return json_success(ret)
Ejemplo n.º 10
0
def get_old_messages_backend(request, user_profile,
                             anchor = REQ(converter=int),
                             num_before = REQ(converter=to_non_negative_int),
                             num_after = REQ(converter=to_non_negative_int),
                             narrow = REQ('narrow', converter=narrow_parameter, default=None),
                             use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
                             apply_markdown=REQ(default=True,
                                                converter=ujson.loads)):
    # type: (HttpRequest, UserProfile, int, int, int, Optional[List[Dict[str, Any]]], bool, bool) -> HttpResponse
    include_history = ok_to_include_history(narrow, user_profile.realm)

    if include_history and not use_first_unread_anchor:
        query = select([column("id").label("message_id")], None, table("zerver_message"))
        inner_msg_id_col = literal_column("zerver_message.id")
    elif narrow is None:
        query = select([column("message_id"), column("flags")],
                       column("user_profile_id") == literal(user_profile.id),
                       table("zerver_usermessage"))
        inner_msg_id_col = column("message_id")
    else:
        # TODO: Don't do this join if we're not doing a search
        query = select([column("message_id"), column("flags")],
                       column("user_profile_id") == literal(user_profile.id),
                       join(table("zerver_usermessage"), table("zerver_message"),
                            literal_column("zerver_usermessage.message_id") ==
                            literal_column("zerver_message.id")))
        inner_msg_id_col = column("message_id")

    num_extra_messages = 1
    is_search = False

    if narrow is not None:
        # Add some metadata to our logging data for narrows
        verbose_operators = []
        for term in narrow:
            if term['operator'] == "is":
                verbose_operators.append("is:" + term['operand'])
            else:
                verbose_operators.append(term['operator'])
        request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)

        # Build the query for the narrow
        num_extra_messages = 0
        builder = NarrowBuilder(user_profile, inner_msg_id_col)
        search_term = None # type: Optional[Dict[str, Any]]
        for term in narrow:
            if term['operator'] == 'search':
                if not is_search:
                    search_term = term
                    query = query.column(column("subject")).column(column("rendered_content"))
                    is_search = True
                else:
                    # Join the search operators if there are multiple of them
                    search_term['operand'] += ' ' + term['operand']
            else:
                query = builder.add_term(query, term)
        if is_search:
            query = builder.add_term(query, search_term)

    # We add 1 to the number of messages requested if no narrow was
    # specified to ensure that the resulting list always contains the
    # anchor message.  If a narrow was specified, the anchor message
    # might not match the narrow anyway.
    if num_after != 0:
        num_after += num_extra_messages
    else:
        num_before += num_extra_messages

    sa_conn = get_sqlalchemy_connection()
    if use_first_unread_anchor:
        condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0

        # We exclude messages on muted topics when finding the first unread
        # message in this narrow
        muting_conditions = exclude_muting_conditions(user_profile, narrow)
        if muting_conditions:
            condition = and_(condition, *muting_conditions)

        first_unread_query = query.where(condition)
        first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
        first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
        if len(first_unread_result) > 0:
            anchor = first_unread_result[0][0]
        else:
            anchor = LARGER_THAN_MAX_MESSAGE_ID

    before_query = None
    after_query = None
    if num_before != 0:
        before_anchor = anchor
        if num_after != 0:
            # Don't include the anchor in both the before query and the after query
            before_anchor = anchor - 1
        before_query = query.where(inner_msg_id_col <= before_anchor) \
                            .order_by(inner_msg_id_col.desc()).limit(num_before)
    if num_after != 0:
        after_query = query.where(inner_msg_id_col >= anchor) \
                           .order_by(inner_msg_id_col.asc()).limit(num_after)

    if anchor == LARGER_THAN_MAX_MESSAGE_ID:
        # There's no need for an after_query if we're targeting just the target message.
        after_query = None

    if before_query is not None:
        if after_query is not None:
            query = union_all(before_query.self_group(), after_query.self_group())
        else:
            query = before_query
    elif after_query is not None:
        query = after_query
    else:
        # This can happen when a narrow is specified.
        query = query.where(inner_msg_id_col == anchor)

    main_query = alias(query)
    query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
    # This is a hack to tag the query we use for testing
    query = query.prefix_with("/* get_old_messages */")
    query_result = list(sa_conn.execute(query).fetchall())

    # The following is a little messy, but ensures that the code paths
    # are similar regardless of the value of include_history.  The
    # 'user_messages' dictionary maps each message to the user's
    # UserMessage object for that message, which we will attach to the
    # rendered message dict before returning it.  We attempt to
    # bulk-fetch rendered message dicts from remote cache using the
    # 'messages' list.
    search_fields = dict() # type: Dict[int, Dict[str, Text]]
    message_ids = [] # type: List[int]
    user_message_flags = {} # type: Dict[int, List[str]]
    if include_history:
        message_ids = [row[0] for row in query_result]

        # TODO: This could be done with an outer join instead of two queries
        user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
                                  UserMessage.objects.filter(user_profile=user_profile,
                                                             message__id__in=message_ids))
        for row in query_result:
            message_id = row[0]
            if user_message_flags.get(message_id) is None:
                user_message_flags[message_id] = ["read", "historical"]
            if is_search:
                (_, subject, rendered_content, content_matches, subject_matches) = row
                search_fields[message_id] = get_search_fields(rendered_content, subject,
                                                              content_matches, subject_matches)
    else:
        for row in query_result:
            message_id = row[0]
            flags = row[1]
            user_message_flags[message_id] = parse_usermessage_flags(flags)

            message_ids.append(message_id)

            if is_search:
                (_, _, subject, rendered_content, content_matches, subject_matches) = row
                search_fields[message_id] = get_search_fields(rendered_content, subject,
                                                              content_matches, subject_matches)

    cache_transformer = lambda row: MessageDict.build_dict_from_raw_db_row(row, apply_markdown)
    id_fetcher = lambda row: row['id']

    message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
                                              Message.get_raw_db_rows,
                                              message_ids,
                                              id_fetcher=id_fetcher,
                                              cache_transformer=cache_transformer,
                                              extractor=extract_message_dict,
                                              setter=stringify_message_dict)

    message_list = []
    for message_id in message_ids:
        msg_dict = message_dicts[message_id]
        msg_dict.update({"flags": user_message_flags[message_id]})
        msg_dict.update(search_fields.get(message_id, {}))
        message_list.append(msg_dict)

    statsd.incr('loaded_old_messages', len(message_list))
    ret = {'messages': message_list,
           "result": "success",
           "msg": ""}
    return json_success(ret)