Beispiel #1
0
async def test_get_users_by_id(writer: TwitterDataWriter, reader: TwitterDataReader):
    conversation_id = "users by id test"
    obama_message = generate_messages(
        1, random_datestring(), "", conversation_id, OBAMA
    )[0]
    dog_rates_message = generate_messages(
        1, random_datestring(), "", conversation_id, DOG_RATES
    )[0]
    amazing_phil_message = generate_messages(
        1, random_datestring(), "", conversation_id, AMAZINGPHIL
    )[0]

    for message in (obama_message, dog_rates_message, amazing_phil_message):
        writer.add_message(message, True)
    await writer.finalize()

    assert reader.get_users_by_id((OBAMA, AMAZINGPHIL)) == [
        ArchivedUserSummary(
            str(OBAMA),
            "",
            str(OBAMA),
            DBRead.DEFAULT_DISPLAY_NAME,
            DBRead.USER_AVATAR_DEFAULT_URL,
            False,
            False,
        ),
        ArchivedUserSummary(
            str(AMAZINGPHIL),
            "",
            str(AMAZINGPHIL),
            DBRead.DEFAULT_DISPLAY_NAME,
            DBRead.USER_AVATAR_DEFAULT_URL,
            False,
            False,
        ),
    ]

    assert reader.get_users_by_id((DOG_RATES,), sidecar=False) == [
        ArchivedUser(
            str(DOG_RATES),
            "",
            "dog_rates",
            "We Rate Dogs",
            f"{DBRead.AVATAR_API_URL}{DOG_RATES}.jpg",
            True,
            False,
            1,
            "sample bio",
            "",
            dog_rates_message["createdAt"],
            dog_rates_message["createdAt"],
        )
    ]
Beispiel #2
0
async def test_conversations_by_user(
    writer: TwitterDataWriter, reader: TwitterDataReader
):
    """create a conversation or two with random users and then like 30 with a
    particular user. make sure they come back in the right order. this test also
    doesn't bother testing pagination"""
    dummy_convo = generate_messages(
        2,
        random_2000s_datestring(),
        random_2010s_datestring(),
        "won't be returned",
        OBAMA,
        AMAZINGPHIL,
    )

    messages_with_dog_rates = []
    for how_many in range(30, 0, -1):
        messages_with_dog_rates += generate_messages(
            how_many,
            random_2000s_datestring(),
            random_2010s_datestring(),
            get_random_text(),
            DOG_RATES,
        )

    descending_ids = []
    prev_id = ""
    for message in messages_with_dog_rates:
        if message["conversationId"] != prev_id:
            descending_ids.append(message["conversationId"])
            prev_id = message["conversationId"]

    shuffle(messages_with_dog_rates)

    for message in dummy_convo:
        writer.add_message(message)
    for message in messages_with_dog_rates:
        writer.add_message(message, True)
    await writer.finalize()

    assert [
        c.id for c in reader.get_conversations_by_user(DOG_RATES, 1)
    ] == descending_ids[: DBRead.CONVERSATIONS_PER_PAGE]
async def test_onesided_individual_conversation(writer: TwitterDataWriter):
    conversation_id = "one-sided conversation"
    talking_user = DOG_RATES
    start_time = "2010-06-06T01:00:00.543Z"
    end_time = "2010-09-06T01:00:00.345Z"
    messages = generate_messages(
        5, start_time, end_time, conversation_id, talking_user, MAIN_USER_ID
    )

    for message in messages:
        writer.add_message(message)
    await writer.finalize()

    assert writer.execute(
        "select * from conversations where id=?;", (conversation_id,)
    ).fetchone() == (
        conversation_id,
        "individual",
        None,
        5,
        0,
        int(talking_user),
        start_time,
        end_time,
        0,
        None,
        2,
        0,
    )

    assert check_dog_rates(writer) == 5

    assert writer.execute(
        "select * from participants where conversation=? and participant=?;",
        (conversation_id, int(talking_user)),
    ).fetchone() == (
        int(talking_user),
        conversation_id,
        5,
        None,
        None,
        None,
    )

    assert writer.execute(
        "select * from participants where conversation=? and participant=?;",
        (conversation_id, MAIN_USER_ID),
    ).fetchone() == (
        MAIN_USER_ID,
        conversation_id,
        0,
        None,
        None,
        None,
    )
async def test_othersided_individual_conversation(writer: TwitterDataWriter):
    start_time = "2015-10-10T07:07:07.777Z"
    end_time = "2020-10-10T07:07:07.777Z"
    conversation_id = "you-talking"
    messages = generate_messages(
        8, start_time, end_time, conversation_id, MAIN_USER_ID, DOG_RATES
    )
    for message in messages:
        writer.add_message(message)
    await writer.finalize()

    assert writer.execute(
        "select * from conversations where id=?;", (conversation_id,)
    ).fetchone() == (
        conversation_id,
        "individual",
        None,
        8,
        8,
        DOG_RATES,
        start_time,
        end_time,
        1,
        None,
        2,
        0,
    )

    assert check_dog_rates(writer) == 0

    assert writer.execute(
        "select * from participants where conversation=? and participant=?;",
        (conversation_id, DOG_RATES),
    ).fetchone() == (
        DOG_RATES,
        conversation_id,
        0,
        None,
        None,
        None,
    )

    assert writer.execute(
        "select * from participants where conversation=? and participant=?;",
        (conversation_id, MAIN_USER_ID),
    ).fetchone() == (
        MAIN_USER_ID,
        conversation_id,
        8,
        None,
        None,
        None,
    )
Beispiel #5
0
async def test_set_user_nicknotes(
    writer: TwitterDataWriter, reader: TwitterDataReader
):
    writer.add_message(
        generate_messages(1, random_2010s_datestring(), "", "whatevs", OBAMA)[0],
        True,
    )
    await writer.finalize()

    reader.set_user_nickname(OBAMA, "obama")
    assert reader.get_users_by_id((OBAMA,))[0].nickname == "obama"

    reader.set_user_notes(OBAMA, "was president")
    assert reader.get_users_by_id((OBAMA,), False)[0].notes == "was president"
Beispiel #6
0
async def test_conversation_notes(
    writer: TwitterDataWriter, reader: TwitterDataReader
):
    writer.add_message(
        generate_messages(
            1, random_2000s_datestring(), "", "a-conversation", MAIN_USER_ID
        )[0],
        True,
    )
    await writer.finalize()

    reader.set_conversation_notes("a-conversation", "this is a conversation")

    assert (
        reader.get_conversation_by_id("a-conversation").notes
        == "this is a conversation"
    )
Beispiel #7
0
async def test_text_search(writer: TwitterDataWriter, reader: TwitterDataReader):
    texts = ("i am home", "home i am", "being at home", "home is here")

    messages = generate_messages(
        20,
        random_2000s_datestring(),
        random_2010s_datestring(),
        "searchyconvo",
        MAIN_USER_ID,
        AMAZINGPHIL,
    )

    for i in range(len(texts)):
        messages[i]["text"] = texts[i]

    for message in messages:
        writer.add_message(message)
    await writer.finalize()

    results = reader.traverse_messages(after="beginning", search="home")["results"]
    assert len(results) == len(texts)
Beispiel #8
0
async def test_conversations_by_time(
    writer: TwitterDataWriter, reader: TwitterDataReader
):

    page_length = DBRead.CONVERSATIONS_PER_PAGE

    group_ids = [get_random_text() for _ in range(page_length + 2)]
    group_messages = sum(
        [
            generate_messages(
                2,
                random_2000s_datestring(),
                random_2010s_datestring(),
                group_id,
                10101,
            )
            for group_id in group_ids
        ],
        [],
    )

    individual_ids = [get_random_text() for _ in range(page_length + 2)]
    individual_messages = sum(
        [
            generate_messages(
                2,
                random_2000s_datestring(),
                random_2010s_datestring(),
                individual_id,
                MAIN_USER_ID,
                next(unique_id),
            )
            for individual_id in individual_ids
        ],
        [],
    )

    messages = individual_messages + group_messages

    # since every conversation has two messages, every other message starting with
    # the first one is the first in a conversation, and every other message starting
    # with the second one is the last; the firsts will end up in ascending_ids and
    # the lasts in descending_ids
    ascending_ids = [
        m["conversationId"]
        for m in sorted(messages[::2], key=lambda x: x["createdAt"])
    ]
    descending_ids = [
        m["conversationId"]
        for m in sorted(messages[1::2], key=lambda x: x["createdAt"], reverse=True)
    ]

    # the same conversation ids should be in both lists; just sorted according to
    # different times
    assert set(ascending_ids) == set(descending_ids)

    for message in individual_messages:
        writer.add_message(message)
    for message in group_messages:
        writer.add_message(message, True)
    await writer.finalize()

    # check !group and !individual results:

    assert reader.get_conversations_by_time(1, group=False, individual=False) == []

    # check group + individual conversation results:

    asc_conversations_p1 = reader.get_conversations_by_time(1)
    assert ascending_ids[0:page_length] == [x.id for x in asc_conversations_p1]
    asc_conversations_p2 = reader.get_conversations_by_time(2)
    assert ascending_ids[page_length : page_length * 2] == [
        x.id for x in asc_conversations_p2
    ]

    dsc_conversations = reader.get_conversations_by_time(1, asc=False)
    assert [x.id for x in dsc_conversations] == descending_ids[0:page_length]

    # check individual conversation results:
    iac = reader.get_conversations_by_time(1, group=False)
    assert [i.id for i in iac] == [x for x in ascending_ids if x in individual_ids][
        0:page_length
    ]

    # check group conversation results:
    gdc = reader.get_conversations_by_time(2, asc=False, individual=False)
    assert [i.id for i in gdc] == [x for x in descending_ids if x in group_ids][
        page_length : page_length * 2
    ]
async def test_normal_individual_conversation(connected_writer: TwitterDataWriter):
    users = (MAIN_USER_ID, DOG_RATES)
    user0_span = ("2020-01-01T10:00:00.100Z", "2020-01-10T10:00:00.100Z")
    user1_span = ("2020-01-02T10:00:00.100Z", "2020-01-09T10:00:00.100Z")
    conversation_id = "simple_conversation"
    side1 = generate_messages(
        10,
        user0_span[0],
        user0_span[1],
        conversation_id,
        users[0],
        users[1],
    )
    side2 = generate_messages(
        10,
        user1_span[0],
        user1_span[1],
        conversation_id,
        users[1],
        users[0],
    )
    for message in sorted(side1 + side2, key=lambda x: x["createdAt"]):
        connected_writer.add_message(message)

    await connected_writer.finalize()

    assert connected_writer.execute(
        "select * from conversations where id=?;", (conversation_id,)
    ).fetchone() == (
        conversation_id,
        "individual",
        None,
        20,
        10,
        int(users[1]),
        user0_span[0],
        user0_span[1],
        1,
        None,
        2,
        0,
    )

    assert check_dog_rates(connected_writer) == 10

    assert connected_writer.execute(
        "select * from participants where conversation=? and participant=?;",
        (conversation_id, int(users[0])),
    ).fetchone() == (
        int(users[0]),
        conversation_id,
        10,
        None,
        None,
        None,
    )

    assert connected_writer.execute(
        "select * from participants where conversation=? and participant=?;",
        (conversation_id, int(users[1])),
    ).fetchone() == (
        int(users[1]),
        conversation_id,
        10,
        None,
        None,
        None,
    )