コード例 #1
0
def test_invalid_tokenizing():
    for token_type in invalid_tokens:
        for source, expected_match in invalid_tokens[token_type]:
            token = _munch_a_token(source)
            if token:
                assert not (token.type == token_type
                            and token.source_substring == expected_match)
コード例 #2
0
ファイル: test_lexer.py プロジェクト: Luoyufu/pipenv
def test_valid_tokenizing():
    for token_type in valid_tokens:
        for (source, expected_match) in valid_tokens[token_type]:

            token = _munch_a_token(source)
            assert token, "Failed to tokenize: {}\nExpected: {}\nOut of: {}\nGot nothing!".format(
                token_type, expected_match, source)

            assert token.type == token_type, \
                "Expected type: {}\nOut of: {}\nThat matched: {}\nOf type: {}".format(
                    token_type, source, token.source_substring, token.type)
            assert token.source_substring == expected_match
コード例 #3
0
def test_valid_tokenizing():
    for token_type in valid_tokens:
        for (source, expected_match) in valid_tokens[token_type]:

            token = _munch_a_token(source)
            assert token, "Failed to tokenize: {}\nExpected: {}\nOut of: {}\nGot nothing!".format(
                token_type, expected_match, source)

            assert token.type == token_type, \
                "Expected type: {}\nOut of: {}\nThat matched: {}\nOf type: {}".format(
                    token_type, source, token.source_substring, token.type)
            assert token.source_substring == expected_match
コード例 #4
0
ファイル: test_lexer.py プロジェクト: Luoyufu/pipenv
def test_invalid_tokenizing():
    for token_type in invalid_tokens:
        for source, expected_match in invalid_tokens[token_type]:
            token = _munch_a_token(source)
            if token:
                assert not (token.type == token_type and token.source_substring == expected_match)