示例#1
0
文件: tests.py 项目: Javapyc/Javapyc
                def runTest(self):
                    scanner = lexer.MiniJavaScanner()
                    with open(p) as f: s = f.read()
                    tokens = scanner.tokenize(s)
                    javaParser = parser.ProgramParser()
                    tree = javaParser.parse(tokens)
                    typechecker.typecheck(tree)
                    if optimize:
                        for roundNum in range(optimize):
                            tree.optimize()
                    binpath = os.path.join('testbins', name + '.pyc')
                    codegen.codegen(binpath, tree, False)
                    with TempFile() as fout:
                        #run program
                        proc = subprocess.Popen(('python3', binpath),
                                                stdout=fout.f,
                                                stderr=subprocess.STDOUT)

                        #let the process run for a few seconds
                        failTime = time.time() + 3.0
                        while proc.poll() is None:
                            time.sleep(0.01)
                            if time.time() > failTime:
                                proc.kill()
                                raise Exception('Compiled program took too long to execute')

                        res = proc.wait()
                        self.assertEqual(0, res)
                        fout.flush()
                        self.diff(expected, fout.name)
示例#2
0
def main(src):
    try: # On test si le fichier existe bien
        fo = open(src, "r")
        fo.close()
    except IOError:
        print("Le fichier donné n'existe pas.")
        return 0

    with open(src) as file:
        exp = file.read()

    #  On transforme la chaine d'entrée en une liste d'unité lexicale
    ul = scanner(exp)

    #  On convertit cette liste en une liste contenant l'expression postfixé
    postfix = parser(ul)
    if postfix[0]:
        print("Chaine valide\n") # A commenter pour la lisibilité lors des test

        # On écrit dans un fichier, sous forme d'une pile, l'expression
        codegen(postfix[1])
        os.chmod("a.out", 0o744)
        return True
    else:
        print("Erreur syntaxique à la position {}: {}.\nIl manque une partie "\
        "de l'expression\n".format(postfix[1], exp[postfix[1]-1]))
        return False
示例#3
0
def compile_bytecode(name):
    with open(name, 'r') as f:
        text = f.read()
    tokens = tokenize(text)
    ast = parse(tokens)
    fns, stack = codegen(ast)
    return fns, stack
示例#4
0
 def actionExport(self):
     if using_qt5:
         export_as = QFileDialog.getSaveFileName(self, "Export as...")[0]
     else:
         export_as = QFileDialog.getSaveFileName(self, "Export as...")
     create_code = codegen.codegen(self, "mynode", self.blocks, self.resolution)
     if export_as != "":
         create_code.writeToFile(export_as)
示例#5
0
def krd_to_java(filename):
    '''
    Function to use the compiler object and generate java file
    '''
    pgm, symtab = MyCompiler().compile(filename)
    java_filename = pgm.get_name() + "App.java"
    java_file = open(java_filename, "w")
    code = codegen(pgm, symtab, 0, kparser.wnum)
    java_file.write(code)
    java_file.close()
示例#6
0
文件: pyglow.py 项目: mcoted/pyglow
def compile(grammar):
    grammar = grammar.replace('\r\n', '\n')
    grammar += '\n'
    prods = grammar.split('\n')
    prods_tokens = []
    for prod in prods:
        prod = prod.strip()
        if len(prod) == 0:
            continue
        tokens = lexer.lexer(prod)
        if len(tokens) > 0:
            prods_tokens.append(list(tokens))
    tree = parse.parse(prods_tokens)
    code = codegen.codegen(tree)
    return code
示例#7
0
文件: pyglow.py 项目: mcoted/pyglow
def compile( grammar ):
    grammar = grammar.replace( '\r\n' , '\n' )
    grammar += '\n'
    prods = grammar.split( '\n' )
    prods_tokens = []
    for prod in prods:
        prod = prod.strip()
        if len( prod ) == 0:
            continue
        tokens = lexer.lexer( prod )
        if len( tokens ) > 0:
            prods_tokens.append( list( tokens ) )
    tree = parse.parse( prods_tokens )
    code = codegen.codegen( tree )
    return code
示例#8
0
文件: testmain.py 项目: xu3kev/bril
from regalloc import regalloc
from codegen import codegen
import json
import argparse
import sys

REG_PREFIX = "r_"

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='register allocation for bril json format')
    parser.add_argument(
        "--stats",
        action="store_true",
        help="print var to reg mapping instead of code generation")
    parser.add_argument("--num",
                        type=int,
                        default=3,
                        help="number of registers")
    args = parser.parse_args()

    bril = json.load(sys.stdin)

    regs = [REG_PREFIX + '%02d' % (i + 1) for i in range(args.num)]

    for func in bril['functions']:
        regmap, colored, spilled = regalloc(func['instrs'], regs)
        print('%s {' % func['name'])
        codegen(func['instrs'], regmap)
        print('}')
示例#9
0
 def gen_java(self, filename):
     '''
     Function to call the code generation on the AST obtained after compiling.
     '''
     return codegen(self.compile(filename))
示例#10
0
def main():
    args = getArguments()
    if not args:
        sys.exit(1)
    if len(args.files) > 1:
        print("Only one input file is supported")
        sys.exit(1)

    outfile = args.out_file + '.pyc'
    verbose = args.verbose

    dumpbin = args.dump_binary

    if args.pedantic:
        settings.MODE_PEDANTIC = True

    if args.no_fastgen:
        settings.MODE_FASTGEN = False

    settings.VERBOSITY = verbose

    import lexer
    import parser
    import typechecker
    import optimizer
    import codegen

    for inputFile in args.files:
        with inputFile as f:
            s = f.read()

            #Lexical Analysis
            scanner = lexer.MiniJavaScanner()
            tokens = scanner.tokenize(s)
            if args.phase == 'lex':
                lexer.dump(tokens, sys.stdout)
                break

            #Parsing
            p = parser.ProgramParser()
            tree = p.parse(tokens)
            if args.phase == 'parse':
                parser.dump(tree, sys.stdout)
                break
            
            #Typecheck Parse Tree
            try:
                typechecker.typecheck(tree)
            except typechecker.TypecheckException as ex:
                print('Nope', file=sys.stderr)
                if verbose:
                    print(ex, file=sys.stderr)
                    if verbose > 1:
                        raise ex
                sys.exit(1)
            if args.phase == 'typecheck':
                print('Looks good')
                break
            
            #Optimization
            if args.phase == 'optimize':
                parser.dump(tree, sys.stdout)
                print()

            if args.phase == 'optimize' or args.optimize:
                if args.phase == 'optimize' and args.optimize == 0:
                    args.optimize = 1

                print("running", args.optimize, "rounds of optimize()")

                for roundNum in range(args.optimize):
                    tree.optimize()

            if args.phase == 'optimize':
                parser.dump(tree, sys.stdout)
                break

            #Generate Code
            codegen.codegen(outfile, tree, dumpbin)
            if args.phase == 'codegen':
                break


            if args.phase == 'run':
                import importlib
                mod = importlib.import_module(args.out_file)
                mod.main()
示例#11
0
    try:
        listing = open(name + '.lis', 'w')
    except:
        print("Could not create listing file")
        exit(-1)

    try:
        dest = open(name + '.s', 'w')
    except:
        print("Could not create  file")
        exit(-1)
    
    symtab = MasterSymbolTable()
    
    # this is a series of closures used to group functions
    # which have some global state. This is done to
    # avoid sharing globals between modules, at least as much
    # as possible.
    error, warning = error_handlers(symtab, listing, dest)
    print_listing = lister(listing) # get the listing function
    emit, emit_epilog, load, store = codegen(symtab, dest)
    get = Token.Tokenizer(CharBuffer(src), warning)
    
    parse(get, dest, print_listing, symtab, warning, error, emit, load, store)
    listing.write('\n\n' + str(symtab))
    emit_epilog()
    listing.close()
    dest.close()
    print()

示例#12
0
    A_shape = [100, 200]
    B_shape = [100, 200]
    C_shape = [100, 200]
    Z_shape = [100, 200]

    #  A = arrays.isl_array(isl_context, "A", A_shape)
    #  B = arrays.isl_array(isl_context, "B", B_shape)
    #  C = arrays.isl_array(isl_context, "C", C_shape)
    #  Z = arrays.isl_array(isl_context, "Z", Z_shape)

    statements = {}
    stmt = operations.add(isl_context, ("C", C_shape, [0, 0]),
                          ("A", A_shape, [0, 1]), ("B", B_shape, [0, 0]))
    name = statement.get_name(stmt)
    statements[name] = stmt
    stmt = operations.add(isl_context, ("Z", Z_shape, [0, 0]),
                          ("C", C_shape, [0, 0]), ("B", B_shape, [0, 0]))
    name = statement.get_name(stmt)
    statements[name] = stmt

    for stmt in statements.values():
        print stmt

    schedule = schedule.build_schedule(isl_context, statements)
    print schedule

    ast = codegen.codegen(isl_context, schedule, statements)

    print
    print emitter.emit_source(statements, ast)
示例#13
0
#!/usr/bin/env python3

# jwbind.py is the entry python script to drive the backend
# part including parsing class description and code generation.

from parser import parse
from wparser import parseWasm
from codegen import codegen

import sys

#  if len(sys.argv) < 2:
#      print('Usage:\tjwbind <file>')
#      sys.exit(1)

raw = parseWasm(sys.argv[1])
desc = parse(raw)
codegen(desc, sys.argv[1])
示例#14
0
from tokenizer import TokenBuffer, tokenize
from parser import parse
from codegen import codegen
from llvm.core import *
from llvm.ee import *

tokens = TokenBuffer(tokenize(open("sample.cy", "r").read()))
ast = parse(tokens)
module = codegen(ast)
ex = ExecutionEngine.new(module, force_interpreter=True)
main = module.get_function_named('main')
result = ex.run_function(main, [])
print(result.as_real(Type.double()))