示例#1
0
文件: mkw.py 项目: dnsmkl/pysql
def __test__eat_mkw_try_specified():
    assert(
        __eat_mkw_try_specified(lexer.tokenize('LEFT JOIN'), ['LEFT', 'JOIN'])
        == 'LEFT JOIN'
    )
    assert(
        __eat_mkw_try_specified(lexer.tokenize('LEFT JOIN'), ['NO', 'MATCH'])
        == None
    )
示例#2
0
文件: tests.py 项目: wjzz/polyglot
    def test_lexer(self):
        e1 = "1"
        self.assertEqual(list(simplify(tokenize(e1))),
            [('NUMBER', 1), 'EOF'])

        input_str = "(1 + 11 * 22)"

        result = list(simplify(tokenize(input_str)))

        expected = ['LPAREN', ('NUMBER', 1), 'PLUS', ('NUMBER', 11),
            'TIMES', ('NUMBER', 22), 'RPAREN', 'EOF']

        self.assertEqual(expected, result)
示例#3
0
    def test_tokenize(self):
        """
        It tests the calclex.tokenize function in the calclex module.
        """
        testcases = {
                ".32.43 1.23 3.":
                ('LexToken(NUMBER,0.32,1,0), LexToken(NUMBER,0.43,1,3)'
                 ', LexToken(NUMBER,1.23,1,7), LexToken(NUMBER,3.0,1,12)'),
                "x**=abc+sum(max(a,b))":
                ("LexToken(ID,'x',1,0), LexToken(POWER_ASSIGN,'**=',1,1)"
                 ", LexToken(ID,'abc',1,4), LexToken(PLUS,'+',1,7)"
                 ", LexToken(ID,'sum',1,8), LexToken(LPAREN,'(',1,11)"
                 ", LexToken(ID,'max',1,12), LexToken(LPAREN,'(',1,15)"
                 ", LexToken(ID,'a',1,16), LexToken(COMMA,',',1,17)"
                 ", LexToken(ID,'b',1,18), LexToken(RPAREN,')',1,19)"
                 ", LexToken(RPAREN,')',1,20)")}

        # each token has (type, value, lineno, lexpos)
        for key, val in testcases.items():
            x = []
            for tok in lex.tokenize(key):
                x.append(str(tok))

            output = ", ".join(x)

            self.assertTrue(output == val)
示例#4
0
def test_operators():
    cases = [
        ('1 + 1', [BinOp(Num(1), '+', Num(1))]),
    ]
    for string, ast in cases:
        tokens = lexer.tokenize(string)
        assert bcparser.parse(tokens) == ast
示例#5
0
    def solve(self, expr: str = ""):
        """solve cryptarithm problem"""
        print("Problem: {}".format(expr))

        p = Parser(tokenize(expr))
        pr = Problem(p.parse())
        print(pr.search_all_solution())
示例#6
0
    def __init__(self, inptline, maxhistory=2):

        self.tokgen = lex.tokenize(inptline)

        self.maxhistory = maxhistory
        self.tokqueue = deque(maxlen=self.maxhistory)
        self.tokindex = 0
        self.tokstreamended = False
        self.currtok = None  # the current token returned

        # read all the tokens untill the queue is full
        # or the tokens are finished, which ever is first.
        count = 0
        while count < self.maxhistory:
            if self.tokstreamended:
                self.tokqueue.append(None)
                count += 1
                continue

            try:
                tok = next(self.tokgen)
                self.tokqueue.append(tok)

            except:
                # signifies that the self.tokgen has run out of tokens
                # None signifies that the token stream ends.
                self.tokqueue.append(None)
                self.tokstreamended = True
            count += 1
        logger.info(self.tokqueue)

        self.tokstreamended = False # for nextok() to use it fresh
示例#7
0
def main():
    ast.g_llvm_pass_manager.add(ast.g_llvm_executor.target_data)
    ast.g_llvm_pass_manager.add(PASS_INSTRUCTION_COMBINING)
    ast.g_llvm_pass_manager.add(PASS_REASSOCIATE)
    ast.g_llvm_pass_manager.add(PASS_GVN)
    ast.g_llvm_pass_manager.add(PASS_CFG_SIMPLIFICATION)
    ast.g_llvm_pass_manager.initialize()

    operator_precedence = {
        '<': 10,
        '+': 20,
        '-': 20,
        '/': 40,
        '*': 40
    }

    while True:
        print('ready>')
        try:
            raw = raw_input()
        except KeyboardInterrupt:
            break

        parser = parse.Parser(lexer.tokenize(raw), operator_precedence)
        while True:
            if isinstance(parser.current, lexer.EOFToken):
                break
            if isinstance(parser.current, lexer.DefToken):
                parser.handle_definition()
            elif isinstance(parser.current, lexer.ExternToken):
                parser.handle_extern()
            else:
                parser.handle_top_level_expression()

    print('\n', ast.g_llvm_module)
示例#8
0
文件: lang.py 项目: leovt/kant
def compile(source, filename):  # @ReservedAssignment
    tokens = lexer.tokenize(source)
    parser.init(tokens)
    tree = parser.program()
    def error(message, token, level='Error'):
        if token is None:
            # should be avoided for good error messages
            sys.stderr.write('%s %s\n' % (level, message))
        else:
            lines = source.splitlines()
            sys.stderr.write('%s\n%s^\n' % (lines[token.line_no-1], ' '*(token.col_no-1)))
            sys.stderr.write('%s:%d:%d: %s %s\n' % (filename, token.line_no, token.col_no, level, message))
    scopes.error = error
    
    print 'S-expr:', ast.stree(tree)
    print
    ast.ptree(tree)
    print
    code, globls = scopes.build(tree)
    for name, symbol in globls.names.items():
        print name, symbol
    print
    for line in code:
        print '\t'.join(map(str, line))
    return code, globls
示例#9
0
def parse_source(source):
    from lexer import tokenize
    toks = tokenize(source)

    p = Parser(toks)

    return p.parse_program()
示例#10
0
 def test_double_equals(self):
     """Test tokenizing double equals."""
     self.assertEqual(lexer.tokenize("a == 10", ""), [
         Token(token_kinds.identifier, "a"),
         Token(token_kinds.twoequals),
         Token(token_kinds.number, "10")
     ])
示例#11
0
 def test_symbol_splits_keywords(self):
     """Test that the lexer splits on symbols."""
     self.assertEqual(lexer.tokenize("ident1+ident2", ""), [
         Token(token_kinds.identifier, "ident1"),
         Token(token_kinds.plus),
         Token(token_kinds.identifier, "ident2")
     ])
示例#12
0
    def test_lexer_comment(self):
        input_str = "// this is a comment\n+"
        result = list(simplify(tokenize(input_str)))

        expected = ['PLUS', 'EOF']

        self.assertEqual(expected, result)
示例#13
0
 def parse(self, s):
     self.lexer = lexer.tokenize(s)
     self.Next()
     r = self.ParseUntil(0)
     if not self.AtToken('eof'):
         raise ParseError('There are unparsed tokens: %r' % self.token)
     return r
示例#14
0
def compile_bytecode(name):
    with open(name, 'r') as f:
        text = f.read()
    tokens = tokenize(text)
    ast = parse(tokens)
    fns, stack = codegen(ast)
    return fns, stack
示例#15
0
文件: ast.py 项目: Aayyush/Sile
 def parse_type(line_idx, line, type_str):
     try:
         type_tokens = lexer.tokenize(type_str)
     except frontend.SileLexError, e:
         raise DeserializeException(
             "line {} invalid type name: {}: {}".format(
                 line_idx, line, e))
示例#16
0
def test_lexer():
    cases = [
        ("a", [Ident("a")]),
        ("b", [Ident("b")]),
        ("1", [Num(1)]),
    ]
    for inp, outp in cases:
        assert list(lexer.tokenize(inp)) == outp
示例#17
0
def parse():
    print('POEHALI')
    lex = lexer.tokenize()
    parser = yacc.yacc()
    ast = parser.parse(lexer=lex, debug=1)
    file = open('out.txt', 'w')
    file.write(json.dumps(ast, indent=4))
    print(json.dumps(ast, indent=4))
示例#18
0
    def test_lexer_return(self):
        input_str = "return x;"

        result = list(simplify(tokenize(input_str)))

        expected = ['RETURN', ('ID', 'x'), 'SEMI', 'EOF']

        self.assertEqual(expected, result)
示例#19
0
    def test_lexer_print(self):
        input_str = "print(x);"

        result = list(simplify(tokenize(input_str)))

        expected = ['PRINT', 'LPAREN', ('ID', 'x'), 'RPAREN', 'SEMI', 'EOF']

        self.assertEqual(expected, result)
示例#20
0
    def test_lexer_continue(self):
        input_str = "continue;"

        result = list(simplify(tokenize(input_str)))

        expected = ['CONTINUE', 'SEMI', 'EOF']

        self.assertEqual(expected, result)
示例#21
0
 def test_lexer_address(self):
     inputs = [
         ("d = &n", [('ID', 'd'), 'EQUAL', 'AMPERSAND', ('ID', 'n'),
                     'EOF']),
     ]
     for (input, expected) in inputs:
         with self.subTest(input=input):
             self.assertEqual(list(simplify(tokenize(input))), expected)
示例#22
0
    def test_lexer_break(self):
        input_str = "break;"

        result = list(simplify(tokenize(input_str)))

        expected = ['BREAK', 'SEMI', 'EOF']

        self.assertEqual(expected, result)
示例#23
0
    def test_lexer_binops(self):
        input_str = "x && y || z"

        result = list(simplify(tokenize(input_str)))

        expected = [('ID', 'x'), 'AND', ('ID', 'y'), 'OR', ('ID', 'z'), 'EOF']

        self.assertEqual(expected, result)
示例#24
0
 def test_lexer_decl_pointers(self):
     inputs = [
         ("int* d", [('TYPE', 'int'), 'TIMES', ('ID', 'd'), 'EOF']),
         ("int *d", [('TYPE', 'int'), 'TIMES', ('ID', 'd'), 'EOF']),
         ("long* d", [('TYPE', 'long'), 'TIMES', ('ID', 'd'), 'EOF']),
     ]
     for (input, expected) in inputs:
         with self.subTest(input=input):
             self.assertEqual(list(simplify(tokenize(input))), expected)
示例#25
0
    def test_lexer_with_let(self):
        input_str = "let x := 1 in x + x end"

        result = list(simplify(tokenize(input_str)))

        expected = ['LET', ('ID', "x"), "ASSIGN", ('NUMBER', 1), "IN", 
            ('ID', "x"), 'PLUS', ('ID', "x"), 'END', 'EOF']
        
        self.assertEqual(expected, result)
def main():
    # Input verification.
    if len(argv) < 2:
        print("Error! Filename not specified!")
        exit(1)
    else:
        if argv[1] == '-help':
            print_instructions()
            exit(0)

        filename = argv[1]
        parse_flags(argv[2:])

        try:
            with open(filename, 'r') as file:
                lines = file.read().splitlines()
                tokens = tokenize(lines)
                try:
                    address = 0
                    if not flag_values['s']:
                        print(".text")  # section

                    for token in tokens:
                        # Offset used to align operands.
                        operand_offset = INSTRUCTION_MAX_LEN - len(token[0])
                        operand_offset = ' ' * operand_offset

                        try:
                            # Instruction as a hexadecimal number.
                            encoded = bin_to_hex(parse(token), 8)
                        except Exception as exc:
                            if not flag_values['e']:
                                raise exc
                            print(exc)
                            encoded = '[invalid]'

                        # -l flag enables little-endian
                        if flag_values['l']:
                            encoded = hex_to_little_endian(encoded)

                        if not flag_values['s']:
                            instruction_info = token[0] + " " \
                                               + operand_offset \
                                               + ','.join(token[1:])
                            print(
                                dec_int_to_hex(address, 8) + "    " + encoded +
                                "    " + instruction_info)
                        else:
                            print(encoded)

                        address += 4

                except Exception as exc:
                    print(exc)
        except Exception as exc:
            print("Error! Failed to open " + filename + "!")
            exit(1)
示例#27
0
 def test_lexer_ident(self):
     inputs = [
         ("x", [('ID', 'x'), 'EOF']),
         ("x123", [('ID', 'x123'), 'EOF']),
         ("x_123", [('ID', 'x_123'), 'EOF']),
     ]
     for (input, expected) in inputs:
         with self.subTest(input=input):
             self.assertEqual(list(simplify(tokenize(input))), expected)
示例#28
0
def __open_file(program):
    try:
        f = open(program.strip(), 'r')
    except:
        raise ValueError(
            'Brainfuck: program {name} does not exist.'.format(name=program))
    L = [line for line in f]  # parse file
    T = tokenize(L)
    return T
示例#29
0
def __main():
    input_str = "(λx.x x)(λy.y)"
    tokens = tokenize(input_str)
    ast = parse(tokens)

    from pprint import pprint
    print(input_str)
    pprint(tokens)
    pprint(ast)
    print(ast)
示例#30
0
    def test_lexer_fun_decl(self):
        input_str = "void foo(long a, int b, char c) { }"

        result = list(simplify(tokenize(input_str)))

        expected = [('TYPE', 'void'), ('ID', 'foo'),
                    'LPAREN', ('TYPE', 'long'), ('ID', 'a'), 'COMMA',
                    ('TYPE', 'int'), ('ID', 'b'), 'COMMA', ('TYPE', 'char'),
                    ('ID', 'c'), 'RPAREN', 'LBRACE', 'RBRACE', 'EOF']

        self.assertEqual(expected, result)
示例#31
0
 def test_comment(self):
     expect = [
         Token("NEWLINE", "\n", 1, 9),
         Token("STRING", "not a comment", 2, 0),
     ]
     tokens = tokenize("//comment\n\"not a comment\"")
     for (token, ex) in itertools.izip(tokens, expect):
         self.assertEqual(token.typ, ex.typ)
         self.assertEqual(token.value, ex.value)
         self.assertEqual(token.line, ex.line)
         self.assertEqual(token.column, ex.column)
示例#32
0
文件: tests.py 项目: wjzz/polyglot
    def test_lexer_while(self):
        input_str = "while (x > 0) { x = 10; } "

        result = list(simplify(tokenize(input_str)))

        expected = ['WHILE', 'LPAREN', ('ID', 'x'),
                   'GREATER', ('NUMBER', 0), 'RPAREN',
                   'LBRACE', ('ID', 'x'), 'EQUAL', ('NUMBER', 10),
                   'SEMI', 'RBRACE', 'EOF']

        self.assertEqual(expected, result)
def interpret(filename):
    tokens = tuple(lexer.tokenize(filename))
    defs, state = Parser(*tokens).parse()
    gctx, errors = context_check(defs)

    if errors:
        for node, msg in errors:
            print msg, "on line", node.line, "column", node.column

    else:
        return gctx["MAIN"].evaluate({}, gctx)
示例#34
0
def __open_file(name):
    # name is string without '.bf' suffix
    file_path = name.strip() + '.bf'
    try:
        f = open(file_path, 'r')
    except:
        raise ValueError('Brainfuck: macro definition does not exist.')
    L = [line for line in f]  # parse file
    T = tokenize(L)
    f.close()
    return T
示例#35
0
 def test_strings(self):
     expect = [
         Token("STRING", "I'm sorry, Dave",       1, 0),
         Token("STRING", "Open the pod bay door", 1, 18),
     ]
     tokens = tokenize("\"I'm sorry, Dave\" \"Open the pod bay door\"")
     for (token, ex) in itertools.izip(tokens, expect):
         self.assertEqual(token.typ, ex.typ)
         self.assertEqual(token.value, ex.value)
         self.assertEqual(token.line, ex.line)
         self.assertEqual(token.column, ex.column)
示例#36
0
def interpret(script, dictionary):
    print(script)
    tokens = lexer.tokenize(script)

    tokens = list(tokens)

    program = parser.parse_program(tokens, dictionary)

    program, dictionary = evaluator.evaluate_program(program, dictionary)

    return program, dictionary
示例#37
0
def go(string):
	while True:
		tokens = lexer.tokenize(string)
		for t in tokens:
			print t, t.__class__

		# entering loop
		print '>',		# hold cursor in current line
		try:
			string = raw_input()
		except KeyboardInterrupt:	# exit with ctrl+c
			return
示例#38
0
 def test_paren(self):
     expect = [
         Token("LPAREN", "(", 1, 0),
         Token("ID", "a", 1, 1),
         Token("RPAREN", ")", 1, 2),
     ]
     tokens = tokenize("(a)")
     for (token, ex) in itertools.izip(tokens, expect):
         self.assertEqual(token.typ, ex.typ)
         self.assertEqual(token.value, ex.value)
         self.assertEqual(token.line, ex.line)
         self.assertEqual(token.column, ex.column)
示例#39
0
    def test_lexer_compound_assign(self):
        inputs = [
            ("x += 1", [('ID', 'x'), 'PLUS_EQ', ('NUMBER', 1), 'EOF']),
            ("x -= 1", [('ID', 'x'), 'MINUS_EQ', ('NUMBER', 1), 'EOF']),
            ("x *= 1", [('ID', 'x'), 'TIMES_EQ', ('NUMBER', 1), 'EOF']),
            ("x /= 1", [('ID', 'x'), 'DIVIDE_EQ', ('NUMBER', 1), 'EOF']),
            ("x %= 1", [('ID', 'x'), 'MOD_EQ', ('NUMBER', 1), 'EOF']),
        ]

        for (input, expected) in inputs:
            with self.subTest(input=input):
                self.assertEqual(list(simplify(tokenize(input))), expected)
示例#40
0
 def test_assignment(self):
     expect = [
         Token("LET", "LET", 1, 0),
         Token("ID", "a", 1, 4),
         Token("ASSIGN", "BE", 1, 6),
         Token("NUMBER", "25", 1, 9),
     ]
     tokens = tokenize("LET a BE 25")
     for (token, ex) in itertools.izip(tokens, expect):
         self.assertEqual(token.typ, ex.typ)
         self.assertEqual(token.value, ex.value)
         self.assertEqual(token.line, ex.line)
         self.assertEqual(token.column, ex.column)
示例#41
0
def parser(input_string: str) -> None:
    try:
        token_list: TokenList = deque(tokenize(input_string))
        parse(token_list)
        print(f'{input_string} is a correct and valid expression')
    except InvalidTokenException as e:
        print(e)
    except IncorrectTokenException as e:
        print(e)
    except ExcedingTokensException:
        print(f'{input_string} is too long to be a valid expression')
    except NoMoreTokensException:
        print(f'{input_string} is to short to be a valid expression')
示例#42
0
def test_if_else():
    cases = [
        ('if 1 {}', [IfElse(Num(1), [], None)]),
        ('if 1 { return 0; }', [IfElse(Num(1), [Return(Num(0))], None)]),
        ('if 1 {} else {}', [IfElse(Num(1), [], [])]),
        ('if 1 {} else if 0 {}',
         [IfElse(Num(1), [], [IfElse(Num(0), [], None)])]),
        ('if 1 {} else if 0 {} else {}',
         [IfElse(Num(1), [], [IfElse(Num(0), [], [])])]),
    ]
    for string, ast in cases:
        tokens = lexer.tokenize(string)
        assert bcparser.parse(tokens) == ast
示例#43
0
def main(filename):
    input = open(filename)

    parser = Parser()

    try:
        tree = parser.parse(tokenize(input))
        #print_tree(tree, parser.terminals)

        run(tree)

    except parser.ParseErrors, e:
        for token, expected in e.errors:
            print 'Found', token, 'when', expected, 'was expected'
示例#44
0
 def test_numbers(self):
     expect = [
         Token("NUMBER", "1",   1, 0),
         Token("NUMBER", "2",   1, 2),
         Token("NUMBER", "3.2", 1, 4),
         Token("NUMBER", "0.1", 1, 8),
         Token("NUMBER", "-5",  1, 12),
     ]
     tokens = tokenize("1 2 3.2 0.1 -5")
     for (token, ex) in itertools.izip(tokens, expect):
         self.assertEqual(token.typ, ex.typ)
         self.assertEqual(token.value, ex.value)
         self.assertEqual(token.line, ex.line)
         self.assertEqual(token.column, ex.column)
示例#45
0
def uebb_compile(filename):
    tokens = tuple(lexer.tokenize(filename))
    defs, state = Parser(*tokens).parse()
    gctx, errors = context_check(defs)

    if errors:
        for node, msg in errors:
            print msg, "on line", node.line, "column", node.column
                
    else:
        fh = open(filename[:-3] + ".ma", "w")
        for i in compile_program(gctx):
            fh.write(repr(i) + "\n")
        fh.close()
示例#46
0
 def test_escapes(self):
     r"""Test tokenizing strings with escapes.
     This is testing the string:
     " \" \\ \n \\t "
     without the spaces.
     """
     self.assertEqual(lexer.tokenize(r'"\"\\\n\\t"', ""), [
         Token(token_kinds.string,
               [ord('"'),
                ord("\\"),
                ord("\n"),
                ord("\\"),
                ord("t"), 0])
     ])
示例#47
0
def follow_includes(header_name: str) -> set:
    path = get_standard_path(header_name)
    if path is None:
        return set()

    if path not in header_to_symbol_cache:
        print(header_name, path)
        header_to_symbol_cache[path] = set()
        with open(os.path.join(path)) as file:
            tokens = list(lexer.tokenize(file.read()))
            headers = get_headers(tokens)
            for hdr in headers:
                header_to_symbol_cache[path].update(follow_includes(hdr))
    print(header_to_symbol_cache[path])
    return header_to_symbol_cache[path]
示例#48
0
    def eval(self, text):
        """Evaluates the specified text within this interpreter's
        environment.

        line - a string
        """
        tokens = lexer.tokenize(text)
        if not tokens:
            return None

        tree = parser.parse(tokens)

        if self.USE_TAIL_RECURSION:
            analyzers.markTailContexts(tree)

        return tree.eval(self.env)
示例#49
0
文件: repl.py 项目: scribu/scheme.py
    def completer(self, input, state):
        tokens = lexer.tokenize(input)

        symbol = tokens[-1]

        if not lexer.is_symbol(symbol):
            return None

        options = self.get_options(self.scope, symbol.name)

        if state >= len(options):
            return None

        tokens[-1] = options[state]

        return "".join(tokens)
示例#50
0
def test(filename, expected_output):

    print "### Interpreting", filename
    interpreted_ast_output = interpreter.interpret(os.path.join(EXAMPLES, filename))
    
    tokens = tuple(lexer.tokenize(os.path.join(EXAMPLES, filename)))
    defs, state = Parser(*tokens).parse()
    gctx, errors = context_check(defs)
    
    print "### Testing compiled instructions:"
    interpreted_instructions_output = uebb.interpret(coder.compile_program(gctx))
    
    if interpreted_ast_output != expected_output:
        raise RuntimeError("Interpreted output %d was incorrect, was expecting %d" % (interpreted_ast_output, expected_output))
    if interpreted_instructions_output != expected_output:
        raise RuntimeError("Compiled output %d was incorrect, was expecting %d" % (interpreted_instructions_output, expected_output))
    print
    print
示例#51
0
文件: repl.py 项目: scribu/scheme.py
    def start(self):
        stored_tokens = []

        while True:
            try:
                line = raw_input("scheme> ").strip()
            except EOFError:
                print
                break

            if not line:
                continue

            try:
                tokens = lexer.tokenize(line)
            except Exception as e:
                print(e)
                continue

            stored_tokens += tokens

            ast, balance = lexer.get_ast(stored_tokens)

            if balance > 0:
                continue
            elif balance < 0:
                print('Unexpected ")"')
                stored_tokens = []
                continue

            stored_tokens = []

            ast = lexer.expand_quotes(ast)
            ast = lexer.expand_define(ast)

            for expr in ast:
                print(self.scope.eval(expr))
示例#52
0
        elif code[i] == Opcode.HALT:
            print addr(i) + " HALT"

        else:
            print addr(i) + " ?? " + str(code[i])

        i += 1


if __name__ == "__main__":
    if len(sys.argv) > 1:
        print "opening file", sys.argv[1]
        with open(sys.argv[1], 'r') as f:
            prog = f.read()
    else:
        from samples import sample_prog as prog

    ast = parse(tokenize(prog))
    print "AST:"
    pprint.pprint(ast)

    (code, strings) = translate(ast)
    #print "\nCode:"
    #pprint.pprint(code)
    print "\nStrings:"
    pprint.pprint(strings)

    print "\nDisassembly:"
    disassemble(code)
示例#53
0
文件: lang.py 项目: leovt/kant
import sys
from pprint import pprint

tokens = lexer.tokenize('''
(a=b)(c=d);
a = (b = c);
a(b(c));
a(b)(c)(d);
f(1,2,3, 4);
def a:int = 4;
b = 4 + a;
if 6-5 == 4 and a == b {
hallo;
}
else
{
hello;
a + b-c*2;
/* kommentar */
}
def foo(a:int, b:int):int {
    return a + b;
}
7*8;
if 1 {
// kommentar
hello;}

''')


parser.init(tokens)
示例#54
0
def test_basic_operators():
    ops = ['+', '-', '*', '/', '^', '<', '>', '<=', '>=', '==', '!=']
    for op in ops:
        tokens = lexer.tokenize('1 {} 1'.format(op))
        assert bcparser.parse(tokens) == [BinOp(Num(1), op, Num(1))]
示例#55
0
def interpret(ins,mem,rpl_r=32,rpl_c=1,rpl_Q=10,rpl_W=0):
    cmd = ins[0]
    global program_input
    global last_input
    to_return = 0
    if cmd == '[':       # I only want for loops to pass through
        newmem = Memory()
        #newmem.instructions = copy.deepcopy(mem.instructions)
        interpret_list(ins[1],newmem,_rpl_Q=rpl_Q,_rpl_W=rpl_W)
        to_return = newmem.matrix
    elif cmd == '{':
        newmem = Memory()
        #newmem.instructions = copy.deepcopy(mem.instructions)
        newmem.matrix = copy.deepcopy(mem.matrix)
        interpret_list(ins[1],newmem,_rpl_Q=rpl_Q,_rpl_W=rpl_W)
        to_return = newmem.matrix

    if len(ins)>2: del ins[2]

    cpy = 0

    if cmd in SPECIAL_COMS:
        cpy = ins[1]
    else:
        cpy = [interpret(i,mem,rpl_r=rpl_r,rpl_c=rpl_c,rpl_Q=rpl_Q,rpl_W=rpl_W) for i in ins[1]]

    cpy = copy.deepcopy(cpy)

    ins.append(cpy)
    
    if cmd == '#':
        to_return = ins[2][0]
    elif cmd in MATHSET:
        to_return= mm.apply_math(ins[2][0],ins[2][1],MATHDICT[cmd])
    elif cmd in UNMATHSET:
        to_return= mm.apply_unmath(ins[2][0],UNMATHDICT[cmd])
    elif cmd in ADDSET:
        mem.matrix = uu.add(mem.matrix,as_matrix(ins[2][0]),cmd)
        to_return= mem.matrix
    elif cmd in SHIFTSET:
        mem.matrix = uu.shift(mem.matrix,as_float(ins[2][0]),cmd)
        to_return= mem.matrix
    elif cmd in OPSET:
        mem.matrix = uu.apply_op(mem.matrix,cmd)
        to_return= mem.matrix
    elif cmd == 'r':
        to_return= rpl_r
    elif cmd == 'c':
        to_return= rpl_c
    elif cmd == 'Q':
        to_return= rpl_Q
    elif cmd == 'W':
        to_return= rpl_W
    elif cmd == 'L':
        to_return = float(len(mem.matrix))
    elif cmd == 'l':
        to_return = float(len(mem.matrix[0]))
    elif cmd == 'm':
        rows = int(as_float(interpret(ins[2][1],mem,rpl_r=rpl_r,rpl_c=rpl_c,rpl_Q=rpl_Q,rpl_W=rpl_W)))
        cols = int(as_float(interpret(ins[2][2],mem,rpl_r=rpl_r,rpl_c=rpl_c,rpl_Q=rpl_Q,rpl_W=rpl_W)))
        mem.matrix = []
        for r in range(rows):
            row = []
            for c in range(cols):
                row.append(interpret(ins[2][0],mem,rpl_r=r,rpl_c=c,rpl_Q=rpl_Q,rpl_W=rpl_W))
            mem.matrix.append(row)
        to_return= mem.matrix
    elif cmd == 'F':
        rows = int(as_float(interpret(ins[2][1],mem,rpl_r=rpl_r,rpl_c=rpl_c,rpl_Q=rpl_Q,rpl_W=rpl_W)))
        cols = int(as_float(interpret(ins[2][2],mem,rpl_r=rpl_r,rpl_c=rpl_c,rpl_Q=rpl_Q,rpl_W=rpl_W)))
        for W in range(rows):
            for Q in range(cols):
                interpret(ins[2][0],mem,rpl_r=rpl_r,rpl_c=rpl_c,rpl_Q=Q,rpl_W=W)
        to_return= 0
    elif cmd == 'k':
        mem.matrix = as_matrix(ins[2][0])
        to_return= mem.matrix
    elif cmd == 's':
        _set(as_matrix(ins[2][0]),as_float(ins[2][1]),as_float(ins[2][2]),mem)
        to_return= as_matrix(ins[2][0])
    elif cmd == 'g':
        to_return= _get(as_float(ins[2][0]),as_float(ins[2][1]),mem)
    # jumps are unecesary for now. Just use for loops
    # I plan to add them back in eventually,
    # but for now it's just too big of a hassle
    #elif cmd == 'j':
    #    to_return = 0
    #    _inslist = copy.deepcopy(mem.instructions[int(as_float(ins[2][0]))])
    #    _index = int(as_float(ins[2][1]))-1
    elif cmd == 'i':
        to_return = 0
        if as_float(interpret(ins[2][0],mem,rpl_r=rpl_r,rpl_c=rpl_c,rpl_Q=rpl_Q,rpl_W=rpl_W))!=0:
            to_return = ins[2][1]
        else:
            to_return = ins[2][2]
        to_return = interpret(to_return,mem,rpl_r=rpl_r,rpl_c=rpl_c,rpl_Q=rpl_Q,rpl_W=rpl_W)
    #no use for this, just use ascii codes
    #elif cmd == '"': 
    #    pass
    elif cmd == '\'':
        print(chr(int(as_float(ins[2][0]))),end="")
        to_return = 0
    elif cmd == 'y':
        if len(program_input)==0:
            last_input = 0
            to_return= 0
        else:
            last_input = ord(program_input[0])
            program_input = program_input[1:]
            to_return= last_input
    elif cmd == 'n':
        last_input = 0
        if len(program_input)>0:
            newinput = lexer.tokenize([c for c in program_input])
            index=0
            for num in newinput:
                try:
                    nnum = float(num)
                    last_input = nnum
                    index = newinput.index(num)
                    break
                except (TypeError, ValueError):
                    continue
            if index>=len(newinput):
                program_input=''
            else:
                program_input = program_input[program_input.index(newinput[index])+len(newinput[index]):]
        to_return= last_input
    elif cmd == 'p':
        to_return= mm.matrix_product(as_matrix(ins[2][0]))
    elif cmd == 'd':
        to_return= mm.matrix_sum(as_matrix(ins[2][0]))
    elif cmd == 'P':
        to_return= mm.dot_product(as_matrix(ins[2][0]),as_matrix(ins[2][1]))
    elif cmd == 'D':
        to_return= mm.dot_sum(as_matrix(ins[2][0]),as_matrix(ins[2][1]))
    elif cmd=='q':
        mem.matrix=uu.cut_from_tl(mem.matrix,as_float(ins[2][0]),as_float(ins[2][1]))
        to_return= mem.matrix
    elif cmd=='z':
        mem.matrix=uu.cut_from_br(mem.matrix,as_float(ins[2][0]),as_float(ins[2][1]))
        to_return= mem.matrix
    elif cmd == '?':
        to_return= random.uniform(as_float(ins[2][0]),as_float(ins[2][1]))
    elif cmd == '<':
        ins[2] = [[interpret_list(col,mem,_rpl_Q=rpl_Q,_rpl_W=rpl_W) for col in row] for row in ins[2]]
        to_return = uu.pad(ins[2])
    elif cmd == 'N':
        to_return = last_input
    elif cmd == 'S':
        print(str(ins[2][0]))
        to_return = 0
    elif cmd == 'C':
        to_return = uu.contains(as_matrix(ins[2][0]), as_float(ins[2][1]))

    return to_return
示例#56
0
        pprint.pprint({
            "IP": self.IP,
            "STACK": self.STACK,
            "NAME_REG": self.NAME_REG,
            "VARs": self.VARS,
            "IP_STACK": self.IP_STACK,
            "VAR_STACK": self.VAR_STACK,
        })

    def Run(self):
        if(self.debugger):
            self.PrintState()
        while not self.halted:
            self.Step()
            if(self.debugger):
                self.PrintState()


if __name__ == "__main__":
    from samples import sample_prog
    from lexer import tokenize
    from parser import parse
    from translator import translate

    (code, strings) = translate(parse(tokenize(sample_prog)))

    vm = BasicVM()
    vm.Load(code, strings)
    #vm.SetDebugger(True)
    vm.Run()
示例#57
0
def lex_into_list(str):
	return [token for token in lexer.tokenize(str)]
示例#58
0
        return PExpr(expr)


def parse(tokens):
    return Parser(tokens).parse()


if __name__ == "__main__":
    if len(sys.argv) > 1:
        print "opening file", sys.argv[1]
        from lexer import tokenize

        with open(sys.argv[1], "r") as f:
            statements = f.read()
            ast = parse(tokenize(statements))
    else:
        print "using baked-in file"
        ast = parse(
            [
                Token("CLEAR", "CLEAR", 1, 0),
                Token("ID", "top", 3, 0),
                Token("COLON", ":", 3, 3),
                Token("NEWLINE", "\n", 3, 4),
                Token("LET", "LET", 4, 0),
                Token("ID", "a", 4, 4),
                Token("ASSIGN", "BE", 4, 6),
                Token("NUMBER", "25", 4, 9),
                Token("NEWLINE", "\n", 4, 11),
                Token("PRINT", "PRINT", 5, 0),
                Token("STRING", "Hello world", 5, 6),
    def pop(state, *token_classes):
        """
        Check whether the head of tokens is instance of any of the required classes, return it if it is otherwise throw exception
        """
        for token_class in token_classes:
            if isinstance(state.head, token_class):
                return state.head, Parser(*state.tail)

        # No token class was matched, raise exception.
        # Note that as parsing is very strict it does not make much sense to
        # gather all error messages because the subsequent ones don't make much sense.
        raise ParseError("Got " + repr(state.head) + " of class " + state.head.__class__.__name__ + ", was expecting " + " or ".join([tc.__name__ for tc in token_classes]), state.head)

        
    def peek(state, token_class):
        return isinstance(state.head, token_class)


    def skip(state, *token_classes):
        token, state = state.pop(*token_classes)
        return state

if __name__ == "__main__":
    filename, = sys.argv[1:]
    tokens = tuple(lexer.tokenize(filename))
    defs, state = Parser(*tokens).parse()
    print "Parsed defs:"
    for d in defs:
        print d

示例#60
0
import pretty_printer

import sys
import lexer
import parser
#import code_generator
#import interpreter

filename = sys.argv[1]
tokens = lexer.tokenize(filename)
#print tokens
goal = parser.parse(tokens)
pretty_printer.pretty_print(goal)
code_generator.generate(goal)
assembler.assemble(asm_file)
linker.link(o_file)