Exemple #1
0
def test_etag_cache_decorator_works(mocker, requests_mock):
    mock_subjects_with_cache(requests_mock)
    v2_api_key = "arbitrary_api_key"
    client = Client(v2_api_key)

    mocker.spy(client, "_fetch_result_from_cache")
    subjects = client.subjects()
    cached_subjects = client.subjects()
    assert client._fetch_result_from_cache.call_count == 1
Exemple #2
0
def test_parameters_convert_datetime_to_string_correctly(requests_mock):
    mock_subjects(requests_mock)
    v2_api_key = "arbitrary_api_key"
    client = Client(v2_api_key)
    now = datetime.datetime.now()

    client.subjects(updated_after=now)

    assert requests_mock.call_count == 1
    assert (requests_mock.request_history[0].url ==
            "https://api.wanikani.com/v2/subjects?updated_after=" +
            now.isoformat())
Exemple #3
0
def test_subject_parameters_are_properly_converted(requests_mock):
    mock_subjects(requests_mock)
    v2_api_key = "arbitrary_api_key"
    client = Client(v2_api_key)

    client.subjects(ids=[1, 2, 3], hidden=False, slugs=["abc", "123"])

    assert requests_mock.call_count == 1
    assert (
        requests_mock.request_history[0].url ==
        "https://api.wanikani.com/v2/subjects?hidden=false&ids=1,2,3&slugs=abc,123"
    )
Exemple #4
0
def add_subject_ids():
    from wanikani_api.client import Client
    from kw_webapp.tasks import get_vocab_by_kanji

    client = Client("2510f001-fe9e-414c-ba19-ccf79af40060")
    subjects = client.subjects(fetch_all=True,
                               types="vocabulary",
                               hidden=False)
    total_subs = len(subjects)
    match_count = 0
    no_local_equivalent = []
    for subject in subjects:
        try:
            local_vocabulary = get_vocab_by_kanji(subject.characters)
            local_vocabulary.wk_subject_id = subject.id
            local_vocabulary.reconcile(subject)
            match_count += 1
            logger.info(
                f"{match_count} / {total_subs}:\t {subject.characters}")
        except Vocabulary.DoesNotExist:
            logger.warn(
                f"Found no local vocabulary with characters: {subject.characters}"
            )
            no_local_equivalent.append(subject)

    unmatched = Vocabulary.objects.filter(wk_subject_id=0)
    return unmatched, no_local_equivalent
Exemple #5
0
def test_real_connection_to_subjects():
    client = Client(api_key)
    subjects = client.subjects()
    assert len(subjects.current_page.data) > 0
    assert subjects.current_page.data[0].resource in [
        "vocabulary", "kanji", "radical"
    ]
Exemple #6
0
def test_client_correctly_renders_empty_collections(requests_mock):
    mock_empty_subjects(requests_mock)
    v2_api_key = "arbitrary_api_key"
    client = Client(v2_api_key)
    response = client.subjects(ids=[1, 2, 3],
                               hidden=False,
                               slugs=["abc", "123"])
    assert len(response.current_page.data) == 0
Exemple #7
0
def get_vocab_df(api_key, sync_vocab=False, type="kanji"):
    """Get vocab dataframe.

    Example:
        subject_id        meanings characters  readings        pos  srs_stage
    0        2467           [One]          一      [いち]  [numeral]          8
    1        2468     [One Thing]         一つ     [ひとつ]  [numeral]          8
    2        2469         [Seven]          七  [なな, しち]  [numeral]          8
    3        2470  [Seven Things]         七つ     [ななつ]  [numeral]          8
    4        2471          [Nine]          九  [きゅう, く]  [numeral]          8

    Args:
        api_key (str): wanikani api key
        sync_vocab (bool, optional): Fetch and cache vocab. Defaults to False.

    Returns:
        [pandas df]: See example.
    """
    # Get Vocab
    wkpath = jpl.get_dir(jpl.external_dir() / "wanikani")
    client = Client(api_key)
    if sync_vocab:
        all_vocabulary = client.subjects(types=type, fetch_all=True)
        vocab_db = []
        for vocab in all_vocabulary:
            row = []
            row.append(vocab.id)
            row.append([i.meaning for i in vocab.meanings])
            row.append(vocab.characters)
            row.append([i.reading for i in vocab.readings])
            if type == "vocabulary":
                row.append(vocab.parts_of_speech)
            else:
                row.append("")
            vocab_db.append(row)
        vocab_csv = pd.DataFrame(
            vocab_db,
            columns=[
                "subject_id", "meanings", "characters", "readings", "pos"
            ],
        )
        vocab_csv["srs_stage"] = 0
        vocab_csv.to_parquet(
            wkpath / "{}.parquet".format(type),
            index=0,
            compression="gzip",
        )
    else:
        vocab_csv = pd.read_parquet(wkpath / "{}.parquet".format(type))

    # Get SRS Scores
    assignments = client.assignments(subject_types=type, fetch_all=True)
    for assignment in assignments:
        vocab_csv.loc[vocab_csv.subject_id == assignment.subject_id,
                      "srs_stage"] = assignment.srs_stage

    return vocab_csv
Exemple #8
0
def test_client_can_get_subjects(requests_mock):
    mock_subjects(requests_mock)

    client = Client("v2_api_key")

    subjects = client.subjects()
    assert len(subjects.current_page.data) > 0
    assert subjects.current_page.data[0].resource in [
        "vocabulary", "kanji", "radical"
    ]
Exemple #9
0
def test_requests_mock(requests_mock):
    mock_subjects(requests_mock)

    client = Client("whatever")
    subjects = client.subjects()
    assert isinstance(subjects, Iterator)