Exemplo n.º 1
0
class Parser:
    def __init__(self, source):
        self.lexer = Lexer(source)
        self.source = source
        self.start_of_object_pos = 0

    def expect(self, expected_token_type):
        if self.lexer.current_token.type != expected_token_type:
            self.error(error_code=ErrorCode.UNEXPECTED_TOKEN,
                       expected=expected_token_type)
        prev_token = self.lexer.current_token
        self.lexer.build_next_token()
        return prev_token

    def expect_not_none(self, tested_object, error_description=''):
        if tested_object is None:
            self.error(error_code=ErrorCode.EXPECTED_NOT_NONE,
                       description=error_description)
        return tested_object

    def parse_program(self):
        toplevel_objects = []

        while (parsed_object := self.try_to_parse_fun_definition()) or\
                (parsed_object := self.try_to_parse_statement()):
Exemplo n.º 2
0
    def test_double_char_operators(self):
        string = '<= > <= < < >= = < > >= < <= <= < >= <= >= >= != > >='
        tokens = [
            Token(TokenType.LEQ),
            Token(TokenType.GRE),
            Token(TokenType.LEQ),
            Token(TokenType.LESS),
            Token(TokenType.LESS),
            Token(TokenType.GEQ),
            Token(TokenType.ASSIGN),
            Token(TokenType.LESS),
            Token(TokenType.GRE),
            Token(TokenType.GEQ),
            Token(TokenType.LESS),
            Token(TokenType.LEQ),
            Token(TokenType.LEQ),
            Token(TokenType.LESS),
            Token(TokenType.GEQ),
            Token(TokenType.LEQ),
            Token(TokenType.GEQ),
            Token(TokenType.GEQ),
            Token(TokenType.NEQ),
            Token(TokenType.GRE),
            Token(TokenType.GEQ)
        ]
        lexer = Lexer(StringSource(string))

        for expected_token in tokens:
            token = lexer.current_token
            self.assertEqual(expected_token.type, token.type)
            self.assertEqual(expected_token.value, token.value)
            lexer.build_next_token()
Exemplo n.º 3
0
    def test_trivial(self):
        file_source = FileSource('tokens/positions1.txt')
        lexer = Lexer(file_source)

        # positions within line
        positions = [1, 2, 3, 5, 9, 11, 14, 16]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position, token.position.column)
            lexer.build_next_token()
Exemplo n.º 4
0
    def test_all_tokens(self):

        tokens = [Token(t) for t in RESERVED_KEYWORDS.values()] + [
            Token(TokenType.ID, 'a'),
            Token(TokenType.ID, 'aaa'),
            Token(TokenType.ID, 'a123'),
            Token(TokenType.ID, 'a_'),
            Token(TokenType.ID, 'a_123'),
            Token(TokenType.ID, 'abc_def_123gh'),
            Token(TokenType.SEMI),
            Token(TokenType.COMMA),
            Token(TokenType.COLON),
            Token(TokenType.PLUS),
            Token(TokenType.MINUS),
            Token(TokenType.MUL),
            Token(TokenType.FLOAT_DIV),
            Token(TokenType.ASSIGN),
            Token(TokenType.LPAREN),
            Token(TokenType.RPAREN),
            Token(TokenType.LBRACK),
            Token(TokenType.RBRACK),
            Token(TokenType.LCURB),
            Token(TokenType.RCURB),
            Token(TokenType.LESS),
            Token(TokenType.GRE),
            Token(TokenType.LEQ),
            Token(TokenType.GEQ),
            Token(TokenType.EQ),
            Token(TokenType.NEQ),
            Token(TokenType.POW),
            Token(TokenType.SCALAR, 0),
            Token(TokenType.SCALAR, 12),
            Token(TokenType.SCALAR, 12.345),
            Token(TokenType.SCALAR, 12.345),
            Token(TokenType.SCALAR, float('12.345e6')),
            Token(TokenType.SCALAR, float('12.345e-6')),
            Token(TokenType.SCALAR, 0),
            Token(TokenType.SCALAR, 0.01),
            Token(TokenType.SCALAR, float('0.001e2')),
            Token(TokenType.SCALAR, float('0.0001e-2')),
            Token(TokenType.ETX)
        ]

        file_source = FileSource('tokens/all_tokens.txt')
        lexer = Lexer(file_source)
        for expected_token in tokens:
            token = lexer.current_token
            self.assertEqual(expected_token.type, token.type)
            self.assertEqual(expected_token.value, token.value)
            lexer.build_next_token()
Exemplo n.º 5
0
    def test_real_life_problems(self):
        file_source = FileSource('tokens/positions3.txt')
        lexer = Lexer(file_source)

        positions = [
            (2, 1), (2, 5), (2, 20), (2, 21), (2, 22),
            (3, 5), (3, 13), (3, 15), (3, 31), (3, 33), (3, 49), (3, 51), (3, 52),
            (4, 1)
        ]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position[0], token.position.line)
            self.assertEqual(expected_position[1], token.position.column)
            lexer.build_next_token()
Exemplo n.º 6
0
    def test_etx_on_comment_line(self):
        string = 'not_comment = 1; # a comment'
        lexer = Lexer(StringSource(string))

        tokens = [
            Token(TokenType.ID, 'not_comment'),
            Token(TokenType.ASSIGN),
            Token(TokenType.SCALAR, 1),
            Token(TokenType.SEMI),
            Token(TokenType.ETX)
        ]

        for expected_token in tokens:
            token = lexer.current_token
            self.assertEqual(expected_token.type, token.type)
            self.assertEqual(expected_token.value, token.value)
            lexer.build_next_token()
Exemplo n.º 7
0
    def test_positions_on_multiple_lines(self):
        file_source = FileSource('tokens/positions2.txt')
        lexer = Lexer(file_source)

        positions = [
            (2, 1), (2, 3), (2, 5), (2, 8),
            (3, 1), (3, 3), (3, 5), (3, 7), (3, 8), (3, 9),
            (4, 1),
            (5, 5), (5, 6),
            (6, 10), (6, 12),
            (7, 7), (7, 9),
            (8, 1), (8, 4), (8, 6), (8, 10), (8, 18), (8, 19), (8, 21)
        ]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position[0], token.position.line)
            self.assertEqual(expected_position[1], token.position.column)
            lexer.build_next_token()
Exemplo n.º 8
0
 def test_etx_in_unfinished_string(self):
     string = '"some random string'
     with self.assertRaises(LexerError):
         lexer = Lexer(StringSource(string))
         while lexer.current_token.type != TokenType.ETX:
             lexer.build_next_token()