コード例 #1
0
def main():
    args = parseArguments()
    source = FileSource(args['filename'])
    if args['dump']:
        dumpAST(source)
        return

    Interpreter(source).interpret()
コード例 #2
0
    def test_trivial(self):
        file_source = FileSource('tokens/positions1.txt')
        lexer = Lexer(file_source)

        # positions within line
        positions = [1, 2, 3, 5, 9, 11, 14, 16]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position, token.position.column)
            lexer.build_next_token()
コード例 #3
0
    def test_all_tokens(self):

        tokens = [Token(t) for t in RESERVED_KEYWORDS.values()] + [
            Token(TokenType.ID, 'a'),
            Token(TokenType.ID, 'aaa'),
            Token(TokenType.ID, 'a123'),
            Token(TokenType.ID, 'a_'),
            Token(TokenType.ID, 'a_123'),
            Token(TokenType.ID, 'abc_def_123gh'),
            Token(TokenType.SEMI),
            Token(TokenType.COMMA),
            Token(TokenType.COLON),
            Token(TokenType.PLUS),
            Token(TokenType.MINUS),
            Token(TokenType.MUL),
            Token(TokenType.FLOAT_DIV),
            Token(TokenType.ASSIGN),
            Token(TokenType.LPAREN),
            Token(TokenType.RPAREN),
            Token(TokenType.LBRACK),
            Token(TokenType.RBRACK),
            Token(TokenType.LCURB),
            Token(TokenType.RCURB),
            Token(TokenType.LESS),
            Token(TokenType.GRE),
            Token(TokenType.LEQ),
            Token(TokenType.GEQ),
            Token(TokenType.EQ),
            Token(TokenType.NEQ),
            Token(TokenType.POW),
            Token(TokenType.SCALAR, 0),
            Token(TokenType.SCALAR, 12),
            Token(TokenType.SCALAR, 12.345),
            Token(TokenType.SCALAR, 12.345),
            Token(TokenType.SCALAR, float('12.345e6')),
            Token(TokenType.SCALAR, float('12.345e-6')),
            Token(TokenType.SCALAR, 0),
            Token(TokenType.SCALAR, 0.01),
            Token(TokenType.SCALAR, float('0.001e2')),
            Token(TokenType.SCALAR, float('0.0001e-2')),
            Token(TokenType.ETX)
        ]

        file_source = FileSource('tokens/all_tokens.txt')
        lexer = Lexer(file_source)
        for expected_token in tokens:
            token = lexer.current_token
            self.assertEqual(expected_token.type, token.type)
            self.assertEqual(expected_token.value, token.value)
            lexer.build_next_token()
コード例 #4
0
    def test_real_life_problems(self):
        file_source = FileSource('tokens/positions3.txt')
        lexer = Lexer(file_source)

        positions = [
            (2, 1), (2, 5), (2, 20), (2, 21), (2, 22),
            (3, 5), (3, 13), (3, 15), (3, 31), (3, 33), (3, 49), (3, 51), (3, 52),
            (4, 1)
        ]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position[0], token.position.line)
            self.assertEqual(expected_position[1], token.position.column)
            lexer.build_next_token()
コード例 #5
0
    def test_positions_on_multiple_lines(self):
        file_source = FileSource('tokens/positions2.txt')
        lexer = Lexer(file_source)

        positions = [
            (2, 1), (2, 3), (2, 5), (2, 8),
            (3, 1), (3, 3), (3, 5), (3, 7), (3, 8), (3, 9),
            (4, 1),
            (5, 5), (5, 6),
            (6, 10), (6, 12),
            (7, 7), (7, 9),
            (8, 1), (8, 4), (8, 6), (8, 10), (8, 18), (8, 19), (8, 21)
        ]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position[0], token.position.line)
            self.assertEqual(expected_position[1], token.position.column)
            lexer.build_next_token()
コード例 #6
0
 def test_something(self):
     # dumpAST(FileSource('../../examples/perceptron.txt'))
     Interpreter(FileSource(
         '../../examples/print_override_test.txt')).interpret().to_py()
コード例 #7
0
 def test_filesource(self):
     filesource = FileSource('test.txt')
     parser = Parser(filesource)
     program = parser.parse_program()
     ast_dumper = AstDumper()
     ast_dumper.add_child(lambda: ast_dumper.visit(program), str(program))