예제 #1
0
def match(string, token):
    assert tokenize([string]) == [(token, string), ('ENDMARKER', ''), None]
예제 #2
0
def test_sequence():
    assert tokenize(['a', '123']) == [('NAME', 'a'), ('INT', '123'),
                                      ('ENDMARKER', ''), None]
예제 #3
0
def test_keywords():
    for keyword in KEYWORDS:
        assert tokenize([keyword]) == [(keyword.upper(), keyword),
                                       ('ENDMARKER', ''), None]
예제 #4
0
def test_empty():
    assert tokenize([]) == [('ENDMARKER', ''), None]
예제 #5
0
def test_colon():
    match(':', 'COLON')
    assert tokenize([':']) == [('COLON', ':'), ('ENDMARKER', ''), None]
예제 #6
0
def match(string, token):
    assert tokenize([string]) == [(token, string), ('ENDMARKER', ''), None]
예제 #7
0
def test_keywords():
    for keyword in KEYWORDS:
        assert tokenize([keyword]) == [(keyword.upper(), keyword), ('ENDMARKER', ''), None]
예제 #8
0
def test_sequence():
    assert tokenize(['a', '123']) == [('NAME', 'a'), ('INT', '123'), ('ENDMARKER', ''), None]
예제 #9
0
def test_colon():
    match(':', 'COLON')
    assert tokenize([':']) == [('COLON', ':'), ('ENDMARKER', ''), None]
예제 #10
0
def test_empty():
    assert tokenize([]) == [('ENDMARKER', ''), None]