예제 #1
0
class TestLexer(unittest.TestCase):
    def setUp(self):
        self.lex = Lexer("Test/Examples/gcd.oats")
        self.expected_tokens = [
                Token("special", "func", 1),
                Token("word", "gcd", 1),
                Token("special", "(", 1),
                Token("word", "x", 1), Token("special", ",", 1), Token("word", "y", 1), Token("special", ")", 1), Token("special", "{", 1),
                ]

    def tearDown(self):
        self.lex.f.close()

    def test_next_token(self):
        ct = self.lex.next_token()
        def checkEquivalent(token1, token2):
            self.assertEqual(token1.type_, token2.type_)
            self.assertEqual(token1.lexeme, token2.lexeme)
        for k in range(len(self.expected_tokens)):
            checkEquivalent(self.expected_tokens[k], ct)
            ct = self.lex.next_token()

    def test_prev_token(self):
        ct = self.lex.next_token()
        def checkEquivalent(token1, token2):
            self.assertEqual(token1.type_, token2.type_)
            self.assertEqual(token1.lexeme, token2.lexeme)
        for k in range(len(self.expected_tokens)):
            ct = self.lex.next_token()
        for k in reversed(range(len(self.expected_tokens))):
            ct = self.lex.prev_token()
            checkEquivalent(self.expected_tokens[k], ct)
예제 #2
0
    def test_double_char_operators(self):
        string = '<= > <= < < >= = < > >= < <= <= < >= <= >= >= != > >='
        tokens = [
            Token(TokenType.LEQ),
            Token(TokenType.GRE),
            Token(TokenType.LEQ),
            Token(TokenType.LESS),
            Token(TokenType.LESS),
            Token(TokenType.GEQ),
            Token(TokenType.ASSIGN),
            Token(TokenType.LESS),
            Token(TokenType.GRE),
            Token(TokenType.GEQ),
            Token(TokenType.LESS),
            Token(TokenType.LEQ),
            Token(TokenType.LEQ),
            Token(TokenType.LESS),
            Token(TokenType.GEQ),
            Token(TokenType.LEQ),
            Token(TokenType.GEQ),
            Token(TokenType.GEQ),
            Token(TokenType.NEQ),
            Token(TokenType.GRE),
            Token(TokenType.GEQ)
        ]
        lexer = Lexer(StringSource(string))

        for expected_token in tokens:
            token = lexer.current_token
            self.assertEqual(expected_token.type, token.type)
            self.assertEqual(expected_token.value, token.value)
            lexer.build_next_token()
예제 #3
0
class Parser:
    def __init__(self, source):
        self.lexer = Lexer(source)
        self.source = source
        self.start_of_object_pos = 0

    def expect(self, expected_token_type):
        if self.lexer.current_token.type != expected_token_type:
            self.error(error_code=ErrorCode.UNEXPECTED_TOKEN,
                       expected=expected_token_type)
        prev_token = self.lexer.current_token
        self.lexer.build_next_token()
        return prev_token

    def expect_not_none(self, tested_object, error_description=''):
        if tested_object is None:
            self.error(error_code=ErrorCode.EXPECTED_NOT_NONE,
                       description=error_description)
        return tested_object

    def parse_program(self):
        toplevel_objects = []

        while (parsed_object := self.try_to_parse_fun_definition()) or\
                (parsed_object := self.try_to_parse_statement()):
예제 #4
0
 def setUp(self):
     self.lex = Lexer("Test/Examples/gcd.oats")
     self.expected_tokens = [
             Token("special", "func", 1),
             Token("word", "gcd", 1),
             Token("special", "(", 1),
             Token("word", "x", 1), Token("special", ",", 1), Token("word", "y", 1), Token("special", ")", 1), Token("special", "{", 1),
             ]
예제 #5
0
    def test_trivial(self):
        file_source = FileSource('tokens/positions1.txt')
        lexer = Lexer(file_source)

        # positions within line
        positions = [1, 2, 3, 5, 9, 11, 14, 16]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position, token.position.column)
            lexer.build_next_token()
예제 #6
0
    def test_scalar_starts_with_zeros(self):
        string = '0'

        # should pass
        try:
            Lexer(StringSource(string))
        except LexerError:
            self.fail()

        string += '0'
        # should fail
        with self.assertRaises(LexerError):
            Lexer(StringSource(string))
예제 #7
0
    def test_too_long_id(self):
        id = ''.join(['a'] * 128)

        # should pass
        try:
            Lexer(StringSource(id))
        except LexerError:
            self.fail()

        # should fail
        id += 'a'
        with self.assertRaises(LexerError) as e:
            Lexer(StringSource(id))
        self.assertEqual(ErrorCode.EXCEED_MAX_ID_SIZE, e.exception.error_code)
예제 #8
0
    def test_all_tokens(self):

        tokens = [Token(t) for t in RESERVED_KEYWORDS.values()] + [
            Token(TokenType.ID, 'a'),
            Token(TokenType.ID, 'aaa'),
            Token(TokenType.ID, 'a123'),
            Token(TokenType.ID, 'a_'),
            Token(TokenType.ID, 'a_123'),
            Token(TokenType.ID, 'abc_def_123gh'),
            Token(TokenType.SEMI),
            Token(TokenType.COMMA),
            Token(TokenType.COLON),
            Token(TokenType.PLUS),
            Token(TokenType.MINUS),
            Token(TokenType.MUL),
            Token(TokenType.FLOAT_DIV),
            Token(TokenType.ASSIGN),
            Token(TokenType.LPAREN),
            Token(TokenType.RPAREN),
            Token(TokenType.LBRACK),
            Token(TokenType.RBRACK),
            Token(TokenType.LCURB),
            Token(TokenType.RCURB),
            Token(TokenType.LESS),
            Token(TokenType.GRE),
            Token(TokenType.LEQ),
            Token(TokenType.GEQ),
            Token(TokenType.EQ),
            Token(TokenType.NEQ),
            Token(TokenType.POW),
            Token(TokenType.SCALAR, 0),
            Token(TokenType.SCALAR, 12),
            Token(TokenType.SCALAR, 12.345),
            Token(TokenType.SCALAR, 12.345),
            Token(TokenType.SCALAR, float('12.345e6')),
            Token(TokenType.SCALAR, float('12.345e-6')),
            Token(TokenType.SCALAR, 0),
            Token(TokenType.SCALAR, 0.01),
            Token(TokenType.SCALAR, float('0.001e2')),
            Token(TokenType.SCALAR, float('0.0001e-2')),
            Token(TokenType.ETX)
        ]

        file_source = FileSource('tokens/all_tokens.txt')
        lexer = Lexer(file_source)
        for expected_token in tokens:
            token = lexer.current_token
            self.assertEqual(expected_token.type, token.type)
            self.assertEqual(expected_token.value, token.value)
            lexer.build_next_token()
예제 #9
0
파일: kale.py 프로젝트: darioncassel/kale
def main(filename):
    print("----------Parser Debug-------------")
    tokenizer = Tokenizer(filename)
    tokenizer.tokenize()
    lexer = Lexer(tokenizer.getTokens())
    lexer.lex()
    parser = Parser(lexer.getTokens())
    parser.parse()
    print(parser.getTree())
    print("\n----------Execution Stack-----------")
    interpreter = Interpreter(parser.getTree())
    interpreter.interpret()
    print("\n----------Program Output------------")
    return interpreter.output()
예제 #10
0
    def test_real_life_problems(self):
        file_source = FileSource('tokens/positions3.txt')
        lexer = Lexer(file_source)

        positions = [
            (2, 1), (2, 5), (2, 20), (2, 21), (2, 22),
            (3, 5), (3, 13), (3, 15), (3, 31), (3, 33), (3, 49), (3, 51), (3, 52),
            (4, 1)
        ]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position[0], token.position.line)
            self.assertEqual(expected_position[1], token.position.column)
            lexer.build_next_token()
예제 #11
0
 def test_unknown_single_char_token(self):
     string = '^'
     string_source = StringSource(string)
     with self.assertRaises(LexerError) as e:
         Lexer(string_source)
     self.assertNotEqual(ErrorCode.UNEXPECTED_TOKEN, e.exception.error_code)
     self.assertEqual(ErrorCode.TOKEN_BUILD_FAIL, e.exception.error_code)
예제 #12
0
 def testLexComments(self):
     tokens = []
     tokens.append(Token(0, "#", None))
     tokens.append(Token(1, ")", None))
     tokens.append(Token(2, "+", None))
     tokens.append(Token(3, "-", None))
     tokens.append(Token(4, "*", None))
     tokens.append(Token(5, "/", None))
     tokens.append(Token(6, "#", None))
     self.lexer = Lexer(tokens)
     self.lexer.lex()
     tokens = []
     tokens.append(Token(0, "#", commentType()))
     tokens.append(Token(1, ")", commentType()))
     tokens.append(Token(2, "+", commentType()))
     tokens.append(Token(3, "-", commentType()))
     tokens.append(Token(4, "*", commentType()))
     tokens.append(Token(5, "/", commentType()))
     tokens.append(Token(6, "#", commentType()))
     for token1, token2 in zip(self.lexer.tokens, tokens):
         self.assertEqual(token1.id, token2.id,
                         "Tokenization incorect")
         self.assertEqual(token1.literal, token2.literal,
                         "Tokenization incorect")
         self.assertEqual(type(token1.type), type(token2.type),
                         "Tokenization incorect")
예제 #13
0
 def testGetTokensPostLex(self):
     tokens = []
     tokens.append(Token(0, "(", None))
     tokens.append(Token(1, ")", None))
     tokens.append(Token(2, "+", None))
     tokens.append(Token(3, "-", None))
     tokens.append(Token(4, "*", None))
     tokens.append(Token(5, "/", None))
     tokens.append(Token(6, "11", None))
     self.lexer = Lexer(tokens)
     self.lexer.lex()
     tokens = []
     tokens.append(Token(0, "(", statementBeginType()))
     tokens.append(Token(1, ")", statementEndType()))
     tokens.append(Token(2, "+", addOpType()))
     tokens.append(Token(3, "-", subOpType()))
     tokens.append(Token(4, "*", multOpType()))
     tokens.append(Token(5, "/", divOpType()))
     tokens.append(Token(6, 11, numType()))
     for token1, token2 in zip(self.lexer.getTokens(), tokens):
         self.assertEqual(token1.id, token2.id,
                         "Tokenization incorect")
         self.assertEqual(token1.literal, token2.literal,
                         "Tokenization incorect")
         self.assertEqual(type(token1.type), type(token2.type),
                         "Tokenization incorect")
예제 #14
0
    def test_etx_on_comment_line(self):
        string = 'not_comment = 1; # a comment'
        lexer = Lexer(StringSource(string))

        tokens = [
            Token(TokenType.ID, 'not_comment'),
            Token(TokenType.ASSIGN),
            Token(TokenType.SCALAR, 1),
            Token(TokenType.SEMI),
            Token(TokenType.ETX)
        ]

        for expected_token in tokens:
            token = lexer.current_token
            self.assertEqual(expected_token.type, token.type)
            self.assertEqual(expected_token.value, token.value)
            lexer.build_next_token()
예제 #15
0
 def testInit(self):
     tokens = []
     tokens.append(Token(0, "a", None))
     tokens.append(Token(1, "line", None))
     tokens.append(Token(2, "the", None))
     tokens.append(Token(3, "four", None))
     self.lexer = Lexer(tokens)
     self.assertEqual(self.lexer.tokens, tokens,
                      "Initialization not correct")
예제 #16
0
 def testInterpreterFib(self, mock_input, mock_output):
     """Verify the correctness of the fibonnaci.oats program (The 20th fibonnaci number is 6765)."""
     mock_input.side_effect = ["20"]
     lex = Lexer("Test/Examples/fibonnaci.oats")
     sparser = StatementParser(lex)
     root = sparser.parse_main()
     sym_tab = [dict()]
     sinterpreter = StatementInterpreter(sym_tab)
     sinterpreter.interpret_statement_list(root)
     self.assertEqual(mock_output.getvalue(), "6765\n")
예제 #17
0
 def testInterpreterGCD(self, mock_input, mock_output):
     """Verify the correctness of the gcd.oats program (The gcd of 1071 and 462 is 21)."""
     mock_input.side_effect = ["1071", "462"]
     lex = Lexer("Test/Examples/gcd.oats")
     sparser = StatementParser(lex)
     root = sparser.parse_main()
     sym_tab = [dict()]
     sinterpreter = StatementInterpreter(sym_tab)
     sinterpreter.interpret_statement_list(root)
     self.assertEqual(mock_output.getvalue(), "21\n")
예제 #18
0
    def test_positions_on_multiple_lines(self):
        file_source = FileSource('tokens/positions2.txt')
        lexer = Lexer(file_source)

        positions = [
            (2, 1), (2, 3), (2, 5), (2, 8),
            (3, 1), (3, 3), (3, 5), (3, 7), (3, 8), (3, 9),
            (4, 1),
            (5, 5), (5, 6),
            (6, 10), (6, 12),
            (7, 7), (7, 9),
            (8, 1), (8, 4), (8, 6), (8, 10), (8, 18), (8, 19), (8, 21)
        ]

        for expected_position in positions:
            token = lexer.current_token
            self.assertEqual(expected_position[0], token.position.line)
            self.assertEqual(expected_position[1], token.position.column)
            lexer.build_next_token()
예제 #19
0
 def testGetTokensPreLex(self):
     tokens = []
     tokens.append(Token(0, "#", None))
     tokens.append(Token(1, ")", None))
     tokens.append(Token(2, "+", None))
     tokens.append(Token(3, "-", None))
     tokens.append(Token(4, "*", None))
     tokens.append(Token(5, "/", None))
     tokens.append(Token(6, "#", None))
     self.lexer = Lexer(tokens)
     for token1, token2 in zip(self.lexer.getTokens(), tokens):
         self.assertEqual(token1.id, token2.id,
                         "Tokenization incorect")
         self.assertEqual(token1.literal, token2.literal,
                         "Tokenization incorect")
         self.assertEqual(type(token1.type), type(token2.type),
                         "Tokenization incorect")
예제 #20
0
 def setUp(self):
     self.lex = Lexer("Test/Examples/gcd.oats")
     self.sparser = StatementParser(self.lex)
     self.expected_nodes = [
         Node(Node.STATEMENT_LIST_TYPE),
         Node(Node.FUNC_DEF_TYPE, "gcd"),
         Node(Node.PARAMETER_LIST_TYPE),
         Node(Node.VARIABLE_TYPE, "x"),
         Node(Node.VARIABLE_TYPE, "y"),
         Node(Node.STATEMENT_LIST_TYPE),
         Node(Node.IF_TYPE),
         Node(Node.LT_TYPE),
         Node(Node.VARIABLE_TYPE, "x"),
         Node(Node.VARIABLE_TYPE, "y"),
         Node(Node.STATEMENT_LIST_TYPE),
         Node(Node.ASSIGN_TYPE),
     ]
예제 #21
0
def empezar():
    import sys
    archivoEntrada = open(sys.argv[1], 'r')
    analizadorLexico = Lexer()
    entrada = archivoEntrada.read()
    analizadorLexico.input(entrada)
    lineatokens = ''
    try:
        for token in iter(analizadorLexico.lexer.token, None):
            lineatokens += repr(token.type) + ' ' + repr(
                token.value) + ' ' + repr(token.lineno)
            lineatokens += '\n'
    except Error.LexicalError.LexicalError as error:
        lineatokens = error.mensaje
    archivoEntrada.close()
    analizadorSintactico = Parser(analizadorLexico.tokens)
    analizadorLexico.lexer.lineno = 1
    try:
        raiz = analizadorSintactico.parse(entrada)
        Parser.linea += 'Programa:\n'
        for clase in raiz:
            clase.imprimir()
    except Error.SyntacticalError.SyntacticalError as error:
        Parser.linea = error.mensaje
    analizadorSemantico = VisitanteTabla()
    try:
        analizadorSemantico.visitarProgram(raiz)
    except Error.SemanticError.SemanticError as error:
        try:
            analizadorSemantico.linea = error.mensaje
            analizadorSemantico.linea += '\nEl error se dio en la clase: ' + analizadorSemantico.metodoActual.tabla.padre.nombre + ', metodo: ' + analizadorSemantico.metodoActual.tabla.nombre
        except BaseException:
            pass
    analizadorCodigo = VisitanteLir(analizadorSemantico.tablaPrincipal)
    analizadorCodigo.visitarProgram(raiz)
    dump_tokens = 0
    dump_ast = 0
    dump_symtab = 0
    dump_lir = 0
    for parametro in sys.argv:
        if parametro[0] == '-':
            if parametro == '-dump-tokens':
                dump_tokens = 1
            if parametro == '-dump-ast':
                dump_ast = 1
            if parametro == '-dump-symtab':
                dump_symtab = 1
            if parametro == '-dump-lir':
                dump_lir = 1
    if (dump_tokens):
        archivoSalida = open(sys.argv[1][:-3] + '.tok', 'w')
        archivoSalida.write(lineatokens)
        print "Tokens fueron escritos a: %s" % (str(sys.argv[1][:-3] + '.tok'))
        archivoSalida.close()
    if (dump_ast):
        archivoSalida = open(sys.argv[1][:-3] + '.ast', 'w')
        archivoSalida.write(Parser.linea)
        print "Salida del AST fue escrita a: %s" % (str(sys.argv[1][:-3] +
                                                        '.ast'))
        archivoSalida.close()
    if (dump_symtab):
        archivoSalida = open(sys.argv[1][:-3] + '.sym', 'w')
        archivoSalida.write(analizadorSemantico.linea)
        print "Salida de la tabla de simbolos fue escrita a: %s" % (
            str(sys.argv[1][:-3] + '.sym'))
        archivoSalida.close()
    if (dump_lir):
        archivoSalida = open(sys.argv[1][:-3] + '.lir', 'w')
        archivoSalida.write(analizadorCodigo.linea)
        print "Codigo intermedio escrito a: %s" % (str(sys.argv[1][:-3] +
                                                       '.lir'))
        archivoSalida.close()
    listabat = analizadorCodigo.linea.split('\n')
    lineabat = '@ECHO OFF\n'
    if listabat[0] == '':
        listabat = listabat[1:]
    lineabat += 'echo ' + listabat[0] + '>codigo.tmp\n'
    for linea in listabat[1:]:
        if linea == '':
            lineabat += 'echo+>>codigo.tmp\n'
        else:
            lineabat += 'echo ' + linea + '>>codigo.tmp\n'

    lineabat += 'java -jar microLIR.jar codigo.tmp\necho+\npause\ndel codigo.tmp'
    archivoSalida = open(sys.argv[1][:-3] + '.bat', 'w')
    archivoSalida.write(lineabat)
    print 'Archivo batch ejecutable escrito a: ' + sys.argv[1][:-3] + '.bat'
    archivoSalida.close()
예제 #22
0
 def test_etx_in_unfinished_string(self):
     string = '"some random string'
     with self.assertRaises(LexerError):
         lexer = Lexer(StringSource(string))
         while lexer.current_token.type != TokenType.ETX:
             lexer.build_next_token()
예제 #23
0
from Lexer.Lexer import Lexer
from Parser.StatementParser import StatementParser
from Interpreter.StatementInterpreter import StatementInterpreter
from Common.Common import *
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("file",
                    nargs=1,
                    help="Filename of the .oats file to be interpreted")
parser.add_argument("-d", "--debug", action='store_true')
args = parser.parse_args()
lexer = Lexer(args.file[0])
parser = StatementParser(lexer)
root = parser.parse_main()
if args.debug:
    print("-" * 50)
    print("PARSE TREE")
    print("-" * 50)
    root.print_recursive()
    print("-" * 50)
    print("PROGRAM")
    print("-" * 50)
sym_tab = [dict()]
interpreter = StatementInterpreter(sym_tab)
interpreter.interpret_statement_list(root)
예제 #24
0
def empezar():
    import sys
    archivoEntrada = open(sys.argv[1], 'r')
    analizadorLexico = Lexer()
    entrada = archivoEntrada.read()
    analizadorLexico.input(entrada)
    lineatokens = ''
    try:
        for token in iter(analizadorLexico.lexer.token, None):
            lineatokens += repr(token.type) + ' ' + repr(token.value) + ' ' + repr(token.lineno)
            lineatokens += '\n'
    except Error.LexicalError.LexicalError as error:
        lineatokens = error.mensaje
    archivoEntrada.close()
    analizadorSintactico = Parser(analizadorLexico.tokens)
    analizadorLexico.lexer.lineno = 1
    try:
        raiz = analizadorSintactico.parse(entrada)
        Parser.linea += 'Programa:\n'
        for clase in raiz:
            clase.imprimir()  
    except Error.SyntacticalError.SyntacticalError as error:
        Parser.linea = error.mensaje
    analizadorSemantico = VisitanteTabla()
    try:
        analizadorSemantico.visitarProgram(raiz)
    except Error.SemanticError.SemanticError as error:
        try:
            analizadorSemantico.linea = error.mensaje
            analizadorSemantico.linea += '\nEl error se dio en la clase: ' + analizadorSemantico.metodoActual.tabla.padre.nombre + ', metodo: ' + analizadorSemantico.metodoActual.tabla.nombre
        except BaseException:
            pass
    analizadorCodigo = VisitanteLir(analizadorSemantico.tablaPrincipal)
    analizadorCodigo.visitarProgram(raiz)
    dump_tokens = 0
    dump_ast = 0
    dump_symtab = 0
    dump_lir = 0
    for parametro in sys.argv:
        if parametro[0] == '-':
            if parametro == '-dump-tokens':
                dump_tokens = 1
            if parametro == '-dump-ast':
                dump_ast = 1
            if parametro == '-dump-symtab':
                dump_symtab = 1
            if parametro == '-dump-lir':
                dump_lir = 1   
    if(dump_tokens):
        archivoSalida = open(sys.argv[1][:-3] + '.tok', 'w')
        archivoSalida.write(lineatokens)
        print "Tokens fueron escritos a: %s" % (str(sys.argv[1][:-3] + '.tok'))
        archivoSalida.close()
    if(dump_ast):
        archivoSalida = open(sys.argv[1][:-3] + '.ast', 'w')
        archivoSalida.write(Parser.linea)
        print "Salida del AST fue escrita a: %s" % (str(sys.argv[1][:-3] + '.ast'))
        archivoSalida.close()
    if(dump_symtab):
        archivoSalida = open(sys.argv[1][:-3] + '.sym', 'w')
        archivoSalida.write(analizadorSemantico.linea)
        print "Salida de la tabla de simbolos fue escrita a: %s" % (str(sys.argv[1][:-3] + '.sym'))
        archivoSalida.close()
    if(dump_lir):
        archivoSalida = open(sys.argv[1][:-3] + '.lir', 'w')
        archivoSalida.write(analizadorCodigo.linea)
        print "Codigo intermedio escrito a: %s" % (str(sys.argv[1][:-3] + '.lir'))
        archivoSalida.close()
    listabat = analizadorCodigo.linea.split('\n')
    lineabat = '@ECHO OFF\n'
    if listabat[0] == '':
        listabat = listabat[1:]
    lineabat += 'echo ' + listabat[0] + '>codigo.tmp\n'
    for linea in listabat[1:]:
        if linea == '':
            lineabat += 'echo+>>codigo.tmp\n'
        else:
            lineabat += 'echo ' + linea + '>>codigo.tmp\n'
        
    lineabat += 'java -jar microLIR.jar codigo.tmp\necho+\npause\ndel codigo.tmp'
    archivoSalida = open(sys.argv[1][:-3] + '.bat', 'w')
    archivoSalida.write(lineabat)
    print 'Archivo batch ejecutable escrito a: ' + sys.argv[1][:-3] + '.bat'
    archivoSalida.close()
예제 #25
0
import sys
from Lexer.Lexer import Lexer
from Parser.Parser import Parser
from Interpreter.Interpreter import Interpreter
from Lexer.LexerHash import LexerHash
from Lexer.LexerQueue import LexerQueue

Lexer.run(sys.argv)

Parser.run()
Interpreter.run()

for arg in sys.argv:
    if arg == "-v":
        print("Tabela de variáveis")
        LexerHash.shared().verbose()
        print("\nLista de instruções")
        LexerQueue.shared().verbose()
예제 #26
0
 def setUp(self):
     self.lexer = Lexer(None)
예제 #27
0
class testLexer(unittest.TestCase):

    def setUp(self):
        self.lexer = Lexer(None)

    def testInit(self):
        tokens = []
        tokens.append(Token(0, "a", None))
        tokens.append(Token(1, "line", None))
        tokens.append(Token(2, "the", None))
        tokens.append(Token(3, "four", None))
        self.lexer = Lexer(tokens)
        self.assertEqual(self.lexer.tokens, tokens,
                         "Initialization not correct")
    
    def testLexNoComments(self):
        tokens = []
        tokens.append(Token(0, "(", None))
        tokens.append(Token(1, ")", None))
        tokens.append(Token(2, "+", None))
        tokens.append(Token(3, "-", None))
        tokens.append(Token(4, "*", None))
        tokens.append(Token(5, "/", None))
        tokens.append(Token(6, "11", None))
        self.lexer = Lexer(tokens)
        self.lexer.lex()
        tokens = []
        tokens.append(Token(0, "(", statementBeginType()))
        tokens.append(Token(1, ")", statementEndType()))
        tokens.append(Token(2, "+", addOpType()))
        tokens.append(Token(3, "-", subOpType()))
        tokens.append(Token(4, "*", multOpType()))
        tokens.append(Token(5, "/", divOpType()))
        tokens.append(Token(6, 11, numType()))
        for token1, token2 in zip(self.lexer.tokens, tokens):
            self.assertEqual(token1.id, token2.id,
                            "Tokenization incorect")
            self.assertEqual(token1.literal, token2.literal,
                            "Tokenization incorect")
            self.assertEqual(type(token1.type), type(token2.type),
                            "Tokenization incorect")
    def testLexComments(self):
        tokens = []
        tokens.append(Token(0, "#", None))
        tokens.append(Token(1, ")", None))
        tokens.append(Token(2, "+", None))
        tokens.append(Token(3, "-", None))
        tokens.append(Token(4, "*", None))
        tokens.append(Token(5, "/", None))
        tokens.append(Token(6, "#", None))
        self.lexer = Lexer(tokens)
        self.lexer.lex()
        tokens = []
        tokens.append(Token(0, "#", commentType()))
        tokens.append(Token(1, ")", commentType()))
        tokens.append(Token(2, "+", commentType()))
        tokens.append(Token(3, "-", commentType()))
        tokens.append(Token(4, "*", commentType()))
        tokens.append(Token(5, "/", commentType()))
        tokens.append(Token(6, "#", commentType()))
        for token1, token2 in zip(self.lexer.tokens, tokens):
            self.assertEqual(token1.id, token2.id,
                            "Tokenization incorect")
            self.assertEqual(token1.literal, token2.literal,
                            "Tokenization incorect")
            self.assertEqual(type(token1.type), type(token2.type),
                            "Tokenization incorect")
    
    def testGetTokensPreLex(self):
        tokens = []
        tokens.append(Token(0, "#", None))
        tokens.append(Token(1, ")", None))
        tokens.append(Token(2, "+", None))
        tokens.append(Token(3, "-", None))
        tokens.append(Token(4, "*", None))
        tokens.append(Token(5, "/", None))
        tokens.append(Token(6, "#", None))
        self.lexer = Lexer(tokens)
        for token1, token2 in zip(self.lexer.getTokens(), tokens):
            self.assertEqual(token1.id, token2.id,
                            "Tokenization incorect")
            self.assertEqual(token1.literal, token2.literal,
                            "Tokenization incorect")
            self.assertEqual(type(token1.type), type(token2.type),
                            "Tokenization incorect")
    
    def testGetTokensPostLex(self):
        tokens = []
        tokens.append(Token(0, "(", None))
        tokens.append(Token(1, ")", None))
        tokens.append(Token(2, "+", None))
        tokens.append(Token(3, "-", None))
        tokens.append(Token(4, "*", None))
        tokens.append(Token(5, "/", None))
        tokens.append(Token(6, "11", None))
        self.lexer = Lexer(tokens)
        self.lexer.lex()
        tokens = []
        tokens.append(Token(0, "(", statementBeginType()))
        tokens.append(Token(1, ")", statementEndType()))
        tokens.append(Token(2, "+", addOpType()))
        tokens.append(Token(3, "-", subOpType()))
        tokens.append(Token(4, "*", multOpType()))
        tokens.append(Token(5, "/", divOpType()))
        tokens.append(Token(6, 11, numType()))
        for token1, token2 in zip(self.lexer.getTokens(), tokens):
            self.assertEqual(token1.id, token2.id,
                            "Tokenization incorect")
            self.assertEqual(token1.literal, token2.literal,
                            "Tokenization incorect")
            self.assertEqual(type(token1.type), type(token2.type),
                            "Tokenization incorect")
예제 #28
0
 def __init__(self, source):
     self.lexer = Lexer(source)
     self.source = source
     self.start_of_object_pos = 0