def test_next_token5(self): input = '10 == 5; 3 != 8; [1, 2]; macro(x, y) { x + y };' tests = [(b1u3token.INT, '10'), (b1u3token.EQ, '=='), (b1u3token.INT, '5'), (b1u3token.SEMICOLON, ';'), (b1u3token.INT, '3'), (b1u3token.NOT_EQ, '!='), (b1u3token.INT, '8'), (b1u3token.SEMICOLON, ';'), (b1u3token.LBRACKET, '['), (b1u3token.INT, '1'), (b1u3token.COMMA, ','), (b1u3token.INT, '2'), (b1u3token.RBRACKET, ']'), (b1u3token.SEMICOLON, ';'), (b1u3token.MACRO, 'macro'), (b1u3token.LPAREN, '('), (b1u3token.IDENT, 'x'), (b1u3token.COMMA, ','), (b1u3token.IDENT, 'y'), (b1u3token.RPAREN, ')'), (b1u3token.LBRACE, '{'), (b1u3token.IDENT, 'x'), (b1u3token.PLUS, '+'), (b1u3token.IDENT, 'y'), (b1u3token.RBRACE, '}'), (b1u3token.SEMICOLON, ';'), (b1u3token.EOF, '')] lexer = b1u3token.Lexer(input) for i, t in enumerate(tests): token = lexer.next_token() self.assertEqual( t[0], token.type, f'tests[{i}]: token type wrong expected {t[0]}, got {token.type}' ) self.assertEqual( t[1], token.literal, f'tests[{i}]: literal wrong expected "{t[1]}", got "{token.literal}"' )
def test_parse_infix_expressions(self): infix_tests = [ ['5+5;', 5, '+', 5], ['5-5;', 5, '-', 5], ['5*5;', 5, '*', 5], ['5/5;', 5, '/', 5], ['5>5;', 5, '>', 5], ['5<5;', 5, '<', 5], ['5 == 5;', 5, '==', 5], ['5 != 5;', 5, '!=', 5], ['true == true', True, '==', True], ['true != false', True, '!=', False], ['false == false', False, '==', False] ] for tt in infix_tests: l = b1u3token.Lexer(tt[0]) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0] self.assertTrue(isinstance(stmt, b1u3ast.ExpressionStatement)) exp = stmt.expression self.help_test_infix_expression(exp, tt[1], tt[2], tt[3]) """
def test_parsing_empty_hash_literal(self): input = '{}' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements[0].expression.pairs), 0) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}')
def test_string_literal_expression(self): input = '"hello world";' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0].expression self.assertTrue(isinstance(stmt, b1u3ast.StringLiteral)) self.assertEqual(stmt.value, "hello world")
def test_parsing_index_expression(self): input = "myArray[1+1]" l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') indexExp = program.statements[0].expression self.assertTrue(isinstance(indexExp, b1u3ast.IndexExpression)) self.help_test_identifier(indexExp.left, "myArray") self.help_test_infix_expression(indexExp.index, 1, "+", 1)
def test_identifier_expression(self): input = 'foobar;' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0] exp = stmt.expression self.assertEqual(exp.value, 'foobar', f'exp.value is not foobar, got={exp.value}') self.assertEqual(exp.token_literal(), 'foobar', f'exp.token_literal() is not foobar, got={exp.token_literal()}')
def test_integer_literal_expression(self): input = '5;' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0] literal = stmt.expression self.assertTrue(isinstance(literal, b1u3ast.IntegerLiteral)) self.assertEqual(literal.value, 5, f'literal.value is not 5, got={literal.value}') self.assertEqual(literal.token_literal(), '5', f'literal.token_literal() is not "5", got={literal.token_literal()}')
def test_parse_prefix_expression(self): prefix_tests = [['!5;', '!', 5], ['-15;', '-', 15], ['!true;', '!', True], ['!false;', '!', False]] for tt in prefix_tests: l = b1u3token.Lexer(tt[0]) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0] self.assertTrue(isinstance(stmt, b1u3ast.ExpressionStatement)) exp = stmt.expression self.assertTrue(isinstance(exp, b1u3ast.PrefixExpression)) self.assertEqual(exp.operator, tt[1], f'exp.operator is not {tt[1]}, got={exp.operator}') self.help_test_literal_expression(exp.right, tt[2])
def test_parsing_hash_literals_with_expression(self): input = '{"one": 0 + 1, "two": 10 - 8, "three": 15/5}' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1) hashobj = program.statements[0].expression tests = { "one": lambda x: self.help_test_infix_expression(x, 0, '+', 1), "two": lambda x: self.help_test_infix_expression(x, 10, '-', 8), "three": lambda x: self.help_test_infix_expression(x, 15, '/', 5) } for k in hashobj.pairs: tests[repr(k)](hashobj.pairs[k])
def test_parsing_hash_liteals(self): input = '{"one": 1, "two": 2, "three": 3}' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') hashobj = program.statements[0].expression self.assertTrue(isinstance(hashobj, b1u3ast.HashLiteral)) self.assertEqual(len(hashobj.pairs), 3, f'hashobj.pairs is not 3, got={len(hashobj.pairs)}') expected = {'one': 1, 'two': 2, 'three': 3} for k in hashobj.pairs: isinstance(k, b1u3ast.StringLiteral) expected_value = expected[repr(k)] self.help_test_integer_literal(hashobj.pairs[k], expected_value)
def test_parsing_array_literals(self): input = "[1, 2*2, 3 + 3]" l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0] self.assertTrue(isinstance(stmt, b1u3ast.ExpressionStatement)) exp = stmt.expression self.assertTrue(isinstance(exp, b1u3ast.ArrayLiteral), 'exp is not ArrayLiteral') self.assertEqual(len(exp.elements), 3, f"exp doesn't have 3 elements, got={len(exp.elements)}") self.help_test_integer_literal(exp.elements[0], 1) self.help_test_infix_expression(exp.elements[1], 2, '*', 2) self.help_test_infix_expression(exp.elements[2], 3, '+', 3)
def test_call_expression_parsing(self): input = "add(1, 2+3, 4*5);" l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0].expression self.assertTrue(isinstance(stmt, b1u3ast.CallExpression), f'stmt.Expression is not ast.CallExpression. got={type(stmt)}') exp = stmt self.help_test_identifier(exp.function, 'add') self.assertEqual(len(exp.arguments), 3, f'wrong length of arguments. want=3, got={len(exp.arguments)}') self.help_test_literal_expression(exp.arguments[0], 1) self.help_test_infix_expression(exp.arguments[1], 2, '+', 3) self.help_test_infix_expression(exp.arguments[2], 4, '*', 5)
def test_function_literal_parsing(self): input = "fn(x, y) { x+y }" l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0].expression self.assertTrue(isinstance(stmt, b1u3ast.FunctionLiteral), f'stmt.Expression is not ast.ExpressionStatement. got={type(stmt)}') self.assertEqual(len(stmt.parameters), 2, f'len(stmt.parameters) is not 2, got={len(stmt.parameters)}') self.help_test_literal_expression(stmt.parameters[0], 'x') self.help_test_literal_expression(stmt.parameters[1], 'y') self.assertEqual(len(stmt.body.statements), 1, f'len(stmt.body.statements) is not 1, got={len(stmt.body.statements)}') self.assertTrue(isinstance(stmt.body.statements[0], b1u3ast.ExpressionStatement), f'stmt.body.statements[0] is not ExpressionStatement instance, got={type(stmt.body.statements[0])}') self.help_test_infix_expression(stmt.body.statements[0].expression, 'x', '+', 'y')
def test_boolean_expression(self): input = 'true;false;' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 2, f'p.statements does not contain 2 statements. got={len(program.statements)}') stmt = program.statements[0] literal = stmt.expression self.assertTrue(isinstance(literal, b1u3ast.Boolean)) # print(literal.value) self.assertTrue(literal.value, f'literal.value is not 5, got={literal.value}') stmt = program.statements[1] literal = stmt.expression self.assertTrue(isinstance(literal, b1u3ast.Boolean)) self.assertTrue(not literal.value, f'literal.value is not False, got={not literal.value}')
def test_macro_literal_parsing(self): input = 'macro(x, y) { x + y; }' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1) exp = program.statements[0].expression self.assertTrue(isinstance(exp, b1u3ast.MacroLiteral)) self.assertEqual(len(exp.parameters), 2, 'length of exp.parameters is not 2') self.help_test_literal_expression(exp.parameters[0], 'x') self.help_test_literal_expression(exp.parameters[1], 'y') self.assertEqual(len(exp.body.statements), 1) body_stmt = exp.body.statements[0] self.assertTrue(isinstance(body_stmt, b1u3ast.ExpressionStatement)) self.help_test_infix_expression(body_stmt.expression, 'x', '+', 'y')
def test_next_token4(self): input = 'if return true false else' tests = [(b1u3token.IF, 'if'), (b1u3token.RETURN, 'return'), (b1u3token.TRUE, 'true'), (b1u3token.FALSE, 'false'), (b1u3token.ELSE, 'else'), (b1u3token.EOF, '')] lexer = b1u3token.Lexer(input) for i, t in enumerate(tests): token = lexer.next_token() self.assertEqual( t[0], token.type, f'tests[{i}]: token type wrong expected {t[0]}, got {token.type}' ) self.assertEqual( t[1], token.literal, f'tests[{i}]: literal wrong expected "{t[1]}", got "{token.literal}"' )
def test_return_statement(self): input = """ return 5; return 10; return 993322; """ l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() # print(dir(program)) self.check_parser_errors(p) self.assertEqual(len(program.statements), 3, f'p.statements does not contain 3 statements. got={len(program.statements)}') for s in program.statements: self.assertTrue(isinstance(s, b1u3ast.ReturnStatement), 's is not ReturnStatement') self.assertEqual(s.token_literal(), 'return', f"s is not 'return', got={s.token_literal()}")
def test_next_token6(self): input = '"foobar" "foo bar" {"A": "B"}' tests = [(b1u3token.STRING, "foobar"), (b1u3token.STRING, "foo bar"), (b1u3token.LBRACE, "{"), (b1u3token.STRING, "A"), (b1u3token.COLON, ":"), (b1u3token.STRING, "B"), (b1u3token.RBRACE, "}"), (b1u3token.EOF, '')] lexer = b1u3token.Lexer(input) for i, t in enumerate(tests): token = lexer.next_token() self.assertEqual( t[0], token.type, f'tests[{i}]: token type wrong expected {t[0]}, got {token.type}' ) self.assertEqual( t[1], token.literal, f'tests[{i}]: literal wrong expected "{t[1]}", got "{token.literal}"' )
def test_let_statements(self): tests = [ ["let x = 5;", "x", 5], ["let y = true;", "y", True], ["let foobar = y;", "foobar", "y"] ] for i, tt in enumerate(tests): lexer = b1u3token.Lexer(tt[0]) parser = b1u3parser.Parser(lexer) program = parser.parse_program() self.check_parser_errors(parser) self.assertEqual(len(program.statements), 1, f"Program doesn't contain 1 statements. got={len(program.statements)}") stmt = program.statements[0] self.assertTrue(isinstance(stmt, b1u3ast.LetStatement), f'stmt is not LetStatement instance, got={type(stmt)}') self.help_let_statement_test(stmt, tt[1]) val = stmt.value self.help_test_literal_expression(val, tt[2])
def test_next_token1(self): input = '=+(){},;' tests = [(b1u3token.ASSIGN, '='), (b1u3token.PLUS, '+'), (b1u3token.LPAREN, '('), (b1u3token.RPAREN, ')'), (b1u3token.LBRACE, '{'), (b1u3token.RBRACE, '}'), (b1u3token.COMMA, ','), (b1u3token.SEMICOLON, ';'), (b1u3token.EOF, '')] lexer = b1u3token.Lexer(input) for i, t in enumerate(tests): token = lexer.next_token() self.assertEqual( t[0], token.type, f'tests[{i}]: token type wrong expected {t[0]}, got {token.type}' ) self.assertEqual( t[1], token.literal, f'tests[{i}]: literal wrong expected "{t[1]}", got "{token.literal}"' )
def test_if_expression(self): input = 'if (x < y) { x }' l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0] self.assertTrue(isinstance(stmt, b1u3ast.ExpressionStatement), f'stmt.Expression is not ast.ExpressionStatement. got={type(stmt)}') exp = stmt.expression self.assertTrue(isinstance(exp, b1u3ast.IfExpression), f'stmt.Expression is not ast.IfExpression. got={type(stmt)}') self.help_test_infix_expression(exp.condition, 'x', '<', 'y') self.assertEqual(len(exp.consequence.statements), 1, f'consequence is not 1 statements. got={len(exp.consequence.statements)}') self.assertTrue(isinstance(exp.consequence.statements[0], b1u3ast.ExpressionStatement), f'exp.consequence.statements[0] is not ast.ExpressionStatement. got={type(exp.consequence.statements[0])}') consequence = exp.consequence.statements[0] self.help_test_identifier(consequence.expression, 'x') if exp.alternative: self.fail('consequence.alternative is not None')
def test_function_parameter_parsing(self): tests = [ ['fn() {};', []], ['fn(x) {};', ['x']], ['fn(x, y, z) {};', ['x', 'y', 'z']] ] for t in tests: l = b1u3token.Lexer(t[0]) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) self.assertEqual(len(program.statements), 1, f'p.statements does not contain 1 statements. got={len(program.statements)}') stmt = program.statements[0] self.assertTrue(isinstance(stmt, b1u3ast.ExpressionStatement), f'stmt is not ExpressionStatement') function = stmt.expression self.assertTrue(isinstance(function, b1u3ast.FunctionLiteral)) self.assertEqual(len(function.parameters), len(t[1]), f'length parameter wrong, want {len(t[1])}, got={len(function.parameters)}') for i, tt in enumerate(t[1]): self.help_test_literal_expression(function.parameters[i], tt)
def test_next_token3(self): input = '!-/3*;5 < 100 > 6;' tests = [(b1u3token.BANG, '!'), (b1u3token.MINUS, '-'), (b1u3token.SLASH, '/'), (b1u3token.INT, '3'), (b1u3token.ASTERISK, '*'), (b1u3token.SEMICOLON, ';'), (b1u3token.INT, '5'), (b1u3token.LT, '<'), (b1u3token.INT, '100'), (b1u3token.GT, '>'), (b1u3token.INT, '6'), (b1u3token.SEMICOLON, ';'), (b1u3token.EOF, '')] lexer = b1u3token.Lexer(input) for i, t in enumerate(tests): token = lexer.next_token() self.assertEqual( t[0], token.type, f'tests[{i}]: token type wrong expected {t[0]}, got {token.type}' ) self.assertEqual( t[1], token.literal, f'tests[{i}]: literal wrong expected "{t[1]}", got "{token.literal}"' )
def test_defines_macro(self): input = """ let number = 1; let function = fn(x, y) { x + y }; let mymacro = macro(x, y) { x + y }; """ env = b1u3object.Environment() l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) program = p.parse_program() b1u3evaluator.define_macros(program, env) self.assertEqual( len(program.statements), 2, f'len(program.statements) is not 3, got={len(program.statements)}') # env["number"] # env["function"] obj = env["mymacro"] self.assertTrue(isinstance(obj, b1u3object.Macro)) self.assertEqual(len(obj.parameters), 2) self.assertEqual(repr(obj.parameters[0]), 'x') self.assertEqual(repr(obj.parameters[1]), 'y') self.assertEqual(repr(obj.body), '{ (x + y) }')
def test_next_token2(self): input = """let five = 5; let ten = 10; let add = fn (x, y) { x + y; }; let result = add(five, ten);""" tests = [(b1u3token.LET, 'let'), (b1u3token.IDENT, 'five'), (b1u3token.ASSIGN, '='), (b1u3token.INT, '5'), (b1u3token.SEMICOLON, ';'), (b1u3token.LET, 'let'), (b1u3token.IDENT, 'ten'), (b1u3token.ASSIGN, '='), (b1u3token.INT, '10'), (b1u3token.SEMICOLON, ';'), (b1u3token.LET, 'let'), (b1u3token.IDENT, 'add'), (b1u3token.ASSIGN, '='), (b1u3token.FUNCTION, 'fn'), (b1u3token.LPAREN, '('), (b1u3token.IDENT, 'x'), (b1u3token.COMMA, ','), (b1u3token.IDENT, 'y'), (b1u3token.RPAREN, ')'), (b1u3token.LBRACE, '{'), (b1u3token.IDENT, 'x'), (b1u3token.PLUS, '+'), (b1u3token.IDENT, 'y'), (b1u3token.SEMICOLON, ';'), (b1u3token.RBRACE, '}'), (b1u3token.SEMICOLON, ';'), (b1u3token.LET, 'let'), (b1u3token.IDENT, 'result'), (b1u3token.ASSIGN, '='), (b1u3token.IDENT, 'add'), (b1u3token.LPAREN, '('), (b1u3token.IDENT, 'five'), (b1u3token.COMMA, ','), (b1u3token.IDENT, 'ten'), (b1u3token.RPAREN, ')'), (b1u3token.SEMICOLON, ';'), (b1u3token.EOF, '')] lexer = b1u3token.Lexer(input) for i, t in enumerate(tests): token = lexer.next_token() self.assertEqual( t[0], token.type, f'tests[{i}]: token type wrong expected {t[0]}, got {token.type}' ) self.assertEqual( t[1], token.literal, f'tests[{i}]: literal wrong expected "{t[1]}", got "{token.literal}"' )
def test_operator_precedence_parsing(self): tests = [ [ 'true', 'true', ], [ 'false', 'false' ], [ '3 > 5 == false', '((3 > 5) == false)' ], [ '3 < 5 == true', '((3 < 5) == true)' ], [ '1 + (2 + 3) + 4', '((1 + (2 + 3)) + 4)' ], [ '(5 + 5) + 2', '((5 + 5) + 2)' ], [ '2 / (5 + 5)', '(2 / (5 + 5))' ], [ '-(5 + 5)', '(-(5 + 5))' ], [ '!(true == true)', '(!(true == true))' ], [ 'a + add(b * c) + d', '((a + add((b * c))) + d)' ], [ 'add(a, b, 1, 2 * 3, 4 + 5, add(6, 7 * 8))', 'add(a, b, 1, (2 * 3), (4 + 5), add(6, (7 * 8)))' ], [ 'add(a + b + c * d / f + g)', 'add((((a + b) + ((c * d) / f)) + g))' ], [ 'a * [1, 2, 3, 4][b * c] * d', '((a * ([ 1, 2, 3, 4 ][(b * c)])) * d)' ], [ 'add(a * b[2], b[1], 2 * [1, 2][1])', 'add((a * (b[2])), (b[1]), (2 * ([ 1, 2 ][1])))' ] ] for tt in tests: l = b1u3token.Lexer(tt[0]) p = b1u3parser.Parser(l) program = p.parse_program() self.check_parser_errors(p) actual = repr(program) self.assertEqual(actual, tt[1], f'expected={tt[1]}, got={actual}')
def help_test_parse_program(self, s): l = b1u3token.Lexer(s) p = b1u3parser.Parser(l) return p.parse_program()
def help_test_eval(self, input: str): l = b1u3token.Lexer(input) p = b1u3parser.Parser(l) e = b1u3object.Environment() program = p.parse_program() return b1u3evaluator.b1u3eval(program, e)