def test_print(): assert parse([ tokens.ID('print'), tokens.WS(' '), tokens.ID('butt'), tokens.WS('\n'), ]) == ast.Module(ast.Print(), ast.String(' butt'))
def test_print_escape_newlines(): assert parse([ tokens.ID('print'), tokens.WS(' '), tokens.ID('butt\\n'), tokens.WS('\n'), ]) == ast.Module(ast.Print(), ast.String(' butt\n'))
def test_lex_print(): assert lex('print butt fart\n') == [ tokens.ID('print'), tokens.WS(' '), tokens.ID('butt'), tokens.WS(' '), tokens.ID('fart'), tokens.WS('\n'), ]
def parse(lexemes): # TODO-TEST: other ID's (eg no) do nothing result = ast.Module() lexemes = iter(lexemes) for lexeme in lexemes: if lexeme == tokens.ID('hello'): result = type(result)(*(result.datas + (ast.Hello(), ))) elif lexeme == tokens.ID('print'): string_arg = print_arg(lexemes) result = type(result)(*(result.datas + (ast.Print(), ast.String(string_arg)))) return result
def test_parse_hello(): assert parse([tokens.ID('hello')]) == ast.Module(ast.Hello())
def test_parse_hello_with_whitespace(): assert parse([tokens.WS('\n\n '), tokens.ID('hello'), tokens.WS('\n\t')]) == ast.Module(ast.Hello())
def test_lex_hello(): assert lex('hello') == [tokens.ID('hello')]
def test_lex_hello_whitespace(): assert lex('\n\n hello\n\t') == [ tokens.WS('\n\n '), tokens.ID('hello'), tokens.WS('\n\t') ]