Esempio n. 1
0
def test_english_mnemonics(entropy, expected_mnemonic, expected_seed):
    m = Mnemonic("english")
    mnemonic = m.to_mnemonic(bytes.fromhex(entropy))
    assert m.is_mnemonic_valid(mnemonic)
    assert mnemonic == expected_mnemonic

    seed = Mnemonic.to_seed(mnemonic, passphrase="TREZOR")
    assert seed.hex() == expected_seed
Esempio n. 2
0
def test_expand(lang):
    m = Mnemonic(lang)
    # Generates a random set of words, so will never be the same set of words
    words = m.generate()
    for word in words.split(" "):  # Space delinates in languages not excluded above
        # BIP39 can support word expansion with as little as 4 characters
        norm_word = normalize_string(word)
        for size in range(4, len(norm_word)):
            assert m.expand(norm_word[:size + 1]) == word
Esempio n. 3
0
def test_expand_word():
    m = Mnemonic("english")
    assert "" == m.expand_word("")
    assert " " == m.expand_word(" ")
    assert "access" == m.expand_word("access")  # word in list
    assert "access" == m.expand_word("acce")  # unique prefix expanded to word in list
    assert "acb" == m.expand_word("acb")  # not found at all
    assert "acc" == m.expand_word("acc")  # multi-prefix match
    assert "act" == m.expand_word("act")  # exact three letter match
    assert "action" == m.expand_word("acti")  # unique prefix expanded to word in list
Esempio n. 4
0
def test_compatibility(seed, language, account_path):
    mnemonic = Mnemonic(language).to_mnemonic(seed)
    acct = Account.from_mnemonic(mnemonic, account_path=account_path)
    # NOTE Must do `cd tests/integration/ethers-cli && npm install -g .
    ethers_cli = subprocess.run(
        ['ethers-cli', '-m', mnemonic, '-l', language, '-p', account_path],
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
    )
    if ethers_cli.stderr:
        raise IOError(ethers_cli.stderr.decode("utf-8"))
    ethers_address = ethers_cli.stdout.decode("utf-8").strip()
    assert acct.address == ethers_address
Esempio n. 5
0
def test_generation(lang, num_words):
    m = Mnemonic(lang)
    mnemonic = m.generate(num_words)
    assert m.is_mnemonic_valid(mnemonic)
    # NOTE: Sometimes traditional chinese can return characters that are also valid simplified
    # chinese characters. In that scenario, the detection algorithm will assume simplified.
    if lang == "chinese_traditional":
        assert "chinese" in Mnemonic.detect_language(mnemonic)
    else:
        assert Mnemonic.detect_language(mnemonic) == lang
    assert len(Mnemonic.to_seed(mnemonic)) == 64
Esempio n. 6
0
def test_japanese_mnemonics(entropy, expected_mnemonic, passphrase, expected_seed):
    m = Mnemonic("japanese")
    mnemonic = m.to_mnemonic(bytes.fromhex(entropy))
    assert m.is_mnemonic_valid(mnemonic)
    # NOTE For some reason, the strings weren't appearing in normalized form as
    #      copied rrom BIP39 test vectors
    assert normalize_string(mnemonic) == normalize_string(expected_mnemonic)

    seed = Mnemonic.to_seed(mnemonic, passphrase)
    assert seed.hex() == expected_seed

    # Check this because we had to normalize the string for unicode artifacts
    seed = Mnemonic.to_seed(expected_mnemonic, passphrase)
    assert seed.hex() == expected_seed
Esempio n. 7
0
    settings,
    strategies as st,
)
import pytest
import subprocess

from wan_account import (
    Account, )
from wan_account.hdaccount.mnemonic import (
    VALID_ENTROPY_SIZES,
    Mnemonic,
)

Account.enable_unaudited_hdwallet_features()

language_st = st.sampled_from(Mnemonic.list_languages())

seed_st = st.binary(min_size=min(VALID_ENTROPY_SIZES), max_size=max(VALID_ENTROPY_SIZES)) \
    .filter(lambda x: len(x) in VALID_ENTROPY_SIZES) \
    .filter(lambda s: int.from_bytes(s, byteorder="big") != 0)

node_st = st.tuples(st.integers(min_value=0, max_value=2**31 - 1),
                    st.booleans())
path_st = st.lists(node_st, min_size=0, max_size=10) \
    .map(lambda nodes: list(str(n[0]) + ('' if n[1] else "'") for n in nodes)) \
    .map(lambda nodes: 'm' + ('/' + '/'.join(nodes) if nodes else ''))


@given(seed=seed_st, language=language_st, account_path=path_st)
@settings(deadline=1000)
@pytest.mark.compatibility
Esempio n. 8
0
def test_undetected_language():
    with pytest.raises(ValidationError):
        Mnemonic.detect_language("xxxxxxx")
    with pytest.raises(ValidationError):
        Mnemonic("xxxxxxx")
Esempio n. 9
0
def test_detection(language, word):
    assert language == Mnemonic.detect_language(word)
Esempio n. 10
0
def test_failed_checksum():
    mnemo = Mnemonic("english")
    assert not mnemo.is_mnemonic_valid(
        "bless cloud wheel regular tiny venue bird web grief security dignity zoo"
    )
Esempio n. 11
0

def test_expand_word():
    m = Mnemonic("english")
    assert "" == m.expand_word("")
    assert " " == m.expand_word(" ")
    assert "access" == m.expand_word("access")  # word in list
    assert "access" == m.expand_word("acce")  # unique prefix expanded to word in list
    assert "acb" == m.expand_word("acb")  # not found at all
    assert "acc" == m.expand_word("acc")  # multi-prefix match
    assert "act" == m.expand_word("act")  # exact three letter match
    assert "action" == m.expand_word("acti")  # unique prefix expanded to word in list


@pytest.mark.parametrize("lang", set(
    lang for lang in Mnemonic.list_languages() if lang not in (
        # These languages can't support word expansion
        "japanese",
        "korean",
        "chinese_simplified",
        "chinese_traditional",
    )
))
def test_expand(lang):
    m = Mnemonic(lang)
    # Generates a random set of words, so will never be the same set of words
    words = m.generate()
    for word in words.split(" "):  # Space delinates in languages not excluded above
        # BIP39 can support word expansion with as little as 4 characters
        norm_word = normalize_string(word)
        for size in range(4, len(norm_word)):