示例#1
0
 def test_handles_string_literal(self):
     string1 = 'this is a string !@#$^woooo 1234'
     lut = Lexer('"{string1}"'.format(string1=string1))
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_STRING_LITERAL)
     self.assertEqual(result.value, string1)
示例#2
0
 def test_handles_reserved_word(self):
     lut = Lexer('BEGIN')
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_BEGINFUNCTION)
示例#3
0
 def test_collects_errors(self):
     lut = Lexer('&')
     lut.get_next_token()
     self.assertEqual(len(lut.errors()), 1)
示例#4
0
 def test_handles_valid_double_character_symbol(self):
     lut = Lexer('==')
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_EQ)
示例#5
0
 def test_handles_empty_string(self):
     lut = Lexer('')
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_EOF)
示例#6
0
 def test_decimal_constant(self):
     self.assertEqual(Lexer("4.2").check_tokens(), "<CONSTANT=4.2>\n")
示例#7
0
 def run_interpreter(stream):
     token_stream = Lexer.start(stream)
     parsed_stream = Parser.start(token_stream)
     return Interpreter.simulate_statements(parsed_stream)
示例#8
0
 def run_interpreter(stream):
     token_stream = Lexer.start(stream)
     parsed_stream = Parser.start(token_stream)
     return Interpreter.simulate_statements(parsed_stream)
示例#9
0
def eat_tokens(line):
    lex = Lexer(line)
    line = ""
    while lex.get_next_token():
        line += lex.peek_token().test()
    return line
示例#10
0
 def test_move_pointer_stream(self):
     tokenstream_oracle = TokenStream([PointerRight(0), PointerLeft(15)])
     actual_tokenstream = Lexer.start('H4ck3rs Delight. Del1ghtH4ck3rs.')
     self.assertEqual(actual_tokenstream, tokenstream_oracle)
示例#11
0
import lexer.tag as Tag
from lexer.lexer import Lexer, LexError
import utils.utils as utils

testdir = utils.dirpath('lexer_test', root=utils.testdir)
filelist = [file for file in os.listdir(testdir) if file.endswith('.em')]

# Pass a "cmp" command line argument to make the script generate the compare files
suffix = '.cmp' if len(sys.argv)>1 and sys.argv[1] == 'cmp' else '.out'

for file in filelist:

    infile = testdir + os.path.sep + file
    outfile = infile[0:-3] + suffix
    ins = open(infile)
    lex = Lexer(ins.read())
    ins.close()

    outs = open(outfile, 'w')

    #print 'Test:', file

    outstr_1 = ''
    outstr_2 = ''

    # set last token to EOL so we can skip the first EOL if it is
    # from the end of a comment
    lastTokenTag = '\n'

    while True:
示例#12
0
 def test_separate_integers_as_one_stream(self):
     tokenstream_oracle = TokenStream([Integer(0, 152546)])
     actual_tokenstream = Lexer.start('152          546')
     self.assertEqual(actual_tokenstream, tokenstream_oracle)
示例#13
0
 def test_integer_stream(self):
     tokenstream_oracle = TokenStream([Integer(0, 152), InputCharCell(3), Integer(6, 546)])
     actual_tokenstream = Lexer.start('152 DH@   546')
     self.assertEqual(actual_tokenstream, tokenstream_oracle)
示例#14
0
 def test_io_char_stream(self):
     tokenstream_oracle = TokenStream([OutputCharCell(0), InputCharCell(3), OutputCharCell(18)])
     actual_tokenstream = Lexer.start('HD@ Del1ghtH4ckers@ HD@')
     self.assertEqual(actual_tokenstream, tokenstream_oracle)
示例#15
0
 def test_basic_constant(self):
     self.assertEqual(Lexer("42").check_tokens(), "<CONSTANT=42>\n")
示例#16
0
 def test_basic_char(self):
     self.assertEqual(
         Lexer("'*'").get_next_token().test(), "<CHAR_CONST='*'>")
示例#17
0
 def test_minus_sign_constant(self):
     self.assertEqual(Lexer("-42").check_tokens(), "<MINUS><CONSTANT=42>\n")
示例#18
0
 def test_escaped_newline(self):
     self.assertEqual(
         Lexer("'\\n'").get_next_token().test(), "<CHAR_CONST='\\n'>")
示例#19
0
 def test_io_num_stream(self):
     tokenstream_oracle = TokenStream([OutputNumCell(0), InputNumCell(15)])
     actual_tokenstream = Lexer.start('H4ckers Del1ght= DH=')
     self.assertEqual(actual_tokenstream, tokenstream_oracle)
示例#20
0
 def test_octal_char(self):
     self.assertEqual(
         Lexer("'\\042'").get_next_token().test(), "<CHAR_CONST='\\042'>")
示例#21
0
 def test_handles_single_character_symbol(self):
     lut = Lexer('+')
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_ADD)
示例#22
0
 def test_hex_char(self):
     self.assertEqual(
         Lexer("'0x042'").get_next_token().test(), "<CHAR_CONST='0x042'>")
示例#23
0
 def test_handles_invalid_double_character_symbol(self):
     lut = Lexer('[]')
     self.assertEqual(lut.get_next_token().type, TOKEN_LBRACKET)
     self.assertEqual(lut.get_next_token().type, TOKEN_RBRACKET)
示例#24
0
 def test_error_newline_in_const(self):
     self.assertRaises(Lexer("'\n1'").get_next_token)
示例#25
0
 def test_handles_identifier_abc_123(self):
     lut = Lexer('abc_123')
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_IDENTIFIER)
     self.assertEqual(result.value, 'abc_123')
示例#26
0
 def test_error_escaped_newline_followed_by_newline(self):
     self.assertRaises(Lexer("'\\n\n'").get_next_token)
示例#27
0
 def test_handles_whitespace(self):
     lut = Lexer('        ')
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_EOF)
示例#28
0
 def test_error_unclosed_quote(self):
     self.assertRaises(Lexer("'A").get_next_token)
示例#29
0
 def test_handles_integer_literal(self):
     lut = Lexer('12345')
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_INTEGER_LITERAL)
     self.assertEqual(result.value, 12345)
示例#30
0
 def test_basic_escaped_string(self):
     self.assertEqual(
         Lexer('"Basic \\"string\\""').get_next_token().test(),
         '<STRING="Basic \\"string\\"">',
     )
示例#31
0
 def test_handles_inline_comment(self):
     comment = 'comment'
     lut = Lexer('//{comment}'.format(comment=comment))
     result = lut.get_next_token()
     self.assertEqual(result.type, TOKEN_COMMENT)
     self.assertEqual(result.value, comment)
示例#32
0
 def test_escaped_string(self):
     self.assertEqual(
         Lexer('"Escaped \\\\\\"string\\\\\\\\\\"\\\\"').get_next_token().test(),
         '<STRING="Escaped \\\\\\"string\\\\\\\\\\"\\\\">',
     )
示例#33
0
 def test_plus_sign_constant(self):
     self.assertEqual(Lexer("+42").check_tokens(), "<PLUS><CONSTANT=42>\n")
示例#34
0
 def test_basic_string(self):
     self.assertEqual(Lexer('"Basic string"').get_next_token().test(), '<STRING="Basic string">')
示例#35
0
 def test_many_signs_constant(self):
     self.assertEqual(
         Lexer("+-42").check_tokens(), "<PLUS><MINUS><CONSTANT=42>\n")
示例#36
0
import lexer.tag as Tag
from lexer.lexer import Lexer, LexError
import utils.utils as utils

testdir = utils.dirpath('lexer_test', root=utils.testdir)
filelist = [file for file in os.listdir(testdir) if file.endswith('.em')]

# Pass a "cmp" command line argument to make the script generate the compare files
suffix = '.cmp' if len(sys.argv) > 1 and sys.argv[1] == 'cmp' else '.out'

for file in filelist:

    infile = testdir + os.path.sep + file
    outfile = infile[0:-3] + suffix
    ins = open(infile)
    lex = Lexer(ins.read())
    ins.close()

    outs = open(outfile, 'w')

    #print 'Test:', file

    outstr_1 = ''
    outstr_2 = ''

    # set last token to EOL so we can skip the first EOL if it is
    # from the end of a comment
    lastTokenTag = '\n'

    while True:
示例#37
0
 def test_decimal_constant_starting_with_dot(self):
     self.assertEqual(Lexer(".42").check_tokens(), "<CONSTANT=.42>\n")
示例#38
0
 def test_edit_cell_stream(self):
     tokenstream_oracle = TokenStream([IncrementCell(0), IncrementCell(15),
                                       IncrementCell(18), DecrementCell(21)])
     actual_tokenstream = Lexer.start('H4ck3rs Delight! HD! HD! Del1ghtH4ck3rs!')
     self.assertEqual(actual_tokenstream, tokenstream_oracle)