def test_lexer_as_context_manager_does_not_restore_the_status_if_no_error(): l = clex.CalcLexer() l.load('3 * 5') with l: assert l.get_token() == token.Token(clex.INTEGER, 3) assert l.get_token() == token.Token(clex.LITERAL, '*')
def test_get_tokens_understands_multidigit_integers(): l = clex.CalcLexer() l.load('356') assert l.get_tokens() == [ token.Token(clex.INTEGER, '356'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_understands_floats(): l = clex.CalcLexer() l.load('3.6') assert l.get_tokens() == [ token.Token(clex.FLOAT, '3.6'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_understands_names_with_underscores(): l = clex.CalcLexer() l.load('some_var') assert l.get_tokens() == [ token.Token(clex.NAME, 'some_var'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_understands_uppercase_letters(): l = clex.CalcLexer() l.load('SomeVar') assert l.get_tokens() == [ token.Token(clex.NAME, 'SomeVar'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_ignores_spaces(): l = clex.CalcLexer() l.load('3 + 5') assert l.get_tokens() == [ token.Token(clex.INTEGER, '3'), token.Token(clex.LITERAL, '+'), token.Token(clex.INTEGER, '5'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_understands_multiplication(): l = clex.CalcLexer() l.load('3 * 5') assert l.get_tokens() == [ token.Token(clex.INTEGER, '3'), token.Token(clex.LITERAL, '*'), token.Token(clex.INTEGER, '5'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_understands_unspaced_sum_of_integers(): l = clex.CalcLexer() l.load('3+5') assert l.get_tokens() == [ token.Token(clex.INTEGER, '3'), token.Token(clex.LITERAL, '+'), token.Token(clex.INTEGER, '5'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_understands_exponentiation(): l = clex.CalcLexer() l.load('2 ^ 3') assert l.get_tokens() == [ token.Token(clex.INTEGER, '2'), token.Token(clex.LITERAL, '^'), token.Token(clex.INTEGER, '3'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_understands_subtraction(): l = clex.CalcLexer() l.load('3 - 5') assert l.get_tokens() == [ token.Token(clex.INTEGER, '3'), token.Token(clex.LITERAL, '-'), token.Token(clex.INTEGER, '5'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_token_accepts_text_position(): line = 456 column = 123 t = token.Token('sometype', 'somevalue', position=(line, column)) assert t.type == 'sometype' assert t.value == 'somevalue' assert t.position == (line, column)
def test_lexer_can_stash_and_pop_status(): l = clex.CalcLexer() l.load('3 5') l.stash() l.get_token() l.pop() assert l.get_token() == token.Token(clex.INTEGER, '3')
def test_lexer_as_context_manager_restores_the_status_if_token_error(): l = clex.CalcLexer() l.load('3 * 5') with l: l.get_token() l.get_token() raise clex.TokenError assert l.get_token() == token.Token(clex.INTEGER, 3)
def test_get_tokens_understands_parentheses(): l = clex.CalcLexer() l.load('3 * (5 + 7)') assert l.get_tokens() == [ token.Token(clex.INTEGER, '3'), token.Token(clex.LITERAL, '*'), token.Token(clex.LITERAL, '('), token.Token(clex.INTEGER, '5'), token.Token(clex.LITERAL, '+'), token.Token(clex.INTEGER, '7'), token.Token(clex.LITERAL, ')'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_get_tokens_understands_multiple_operations(): l = clex.CalcLexer() l.load('3 + 5 - 7') assert l.get_tokens() == [ token.Token(clex.INTEGER, '3'), token.Token(clex.LITERAL, '+'), token.Token(clex.INTEGER, '5'), token.Token(clex.LITERAL, '-'), token.Token(clex.INTEGER, '7'), token.Token(clex.EOL), token.Token(clex.EOF) ]
def test_token_equality_accepts_none(): assert token.Token('sometype', 'somevalue') is not None
def test_token_string_representation_with_position(): t = token.Token('sometype', 'somevalue', position=(12, 34)) assert str(t) == "Token(sometype, 'somevalue', line=12, col=34)"
def test_lexer_as_context_manager(): l = clex.CalcLexer() l.load('abcd') with l: assert l.get_token() == token.Token(clex.NAME, 'abcd')
def test_token_equality_ignores_position(): assert token.Token('sometype', 'somevalue', position=(12, 34)) == \ token.Token('sometype', 'somevalue') assert token.Token('sometype', 'somevalue') == \ token.Token('sometype', 'somevalue', position=(12, 34))
def test_token_length(): t = token.Token('sometype', 'somevalue') assert len(t) == len('somevalue') assert bool(t) is True
def test_empty_token_has_length_zero(): t = token.Token('sometype') assert len(t) == 0 assert bool(t) is True
def test_token_equality(): assert token.Token('sometype', 'somevalue') == \ token.Token('sometype', 'somevalue')
def test_token_equality_with_none(): x = None assert token.Token('sometype', 'somevalue') != x
def test_token_value_defaults_to_none(): t = token.Token('sometype') assert t.type == 'sometype' assert t.value is None
def test_token_representation(): t = token.Token('sometype', 'somevalue') assert repr(t) == "Token(sometype, 'somevalue')"
def test_token_accepts_type_and_value(): t = token.Token('sometype', 'somevalue') assert t.type == 'sometype' assert t.value == 'somevalue'
def test_token_transforms_zero(): t = token.Token('sometype', 0) assert t.type == 'sometype' assert t.value == '0'
def test_token_keeps_value_none(): t = token.Token('sometype', None) assert t.type == 'sometype' assert t.value is None
def test_token_transforms_value_in_string(): t = token.Token('sometype', 3) assert t.type == 'sometype' assert t.value == '3'
def test_get_tokens_understands_eof(): l = clex.CalcLexer() l.load('') assert l.get_tokens() == [token.Token(clex.EOF)]