示例#1
0
def main(argv):
    inputStr = argv[0]
    try:
        if os.path.isdir(inputStr):
            files = [
                file for file in os.listdir(inputStr) if file.endswith('.jack')
            ]
            for file in files:
                tokenizedArray = tokenizeFile(inputStr + '/' + file)
                compilerObj = Compiler.Compiler(
                    Compiler.handleTabsArray(tokenizedArray))
                compilerObj.compileEngine()
                outputFileName = file.replace(".jack", ".xml")
                outputStr = inputStr + '/' + outputFileName
                # writeArrayToFile(tokenizedArray, outputFileName, True)
                writeArrayToFile(compilerObj.compiledArray, outputStr, False)
        else:
            tokenizedArray = tokenizeFile(inputStr)
            compilerObj = Compiler.Compiler(
                Compiler.handleTabsArray(tokenizedArray))
            compilerObj.compileEngine()

            outputFileName = inputStr.replace(".jack", ".xml")
            # writeArrayToFile(tokenizedArray, outputFileName, True)
            writeArrayToFile(compilerObj.compiledArray, outputFileName, False)
    except TypeError:
        print("I Love Nand")
示例#2
0
    def __init__(self,
                 sparql=None,
                 compiler=None,
                 evaluator=None,
                 multiline_parser=None,
                 options=['time'],
                 debug=False,
                 load_translations=True):
        self.sparql = sparql
        if self.sparql:
            self.n = sparql.n
        else:
            self.n = Namespaces.Namespaces()
        # self.translator = translator
        if compiler:
            self.compiler = compiler
        else:
            self.compiler = Compiler.Compiler(self.n)
        if evaluator == None:
            evaluator = Evaluator.Evaluator(self.n)
        self.evaluator = evaluator
        self.parser = Parser.Parser(self.n)
        self.urigen = UniqueURIGenerator()
        if multiline_parser == None:
            multiline_parser = MultilineParser.MultilineParser(self.n, self)
        self.multiline_parser = multiline_parser
        self.options = options
        self.cum_comp_time = 0
        self.cum_eval_time = 0
        if not debug:
            self.compiler.debug_off()

        if load_translations:
            from loadTranslations import loadTranslations
            loadTranslations(self)
示例#3
0
 def __init__(self):
     self.instructionstream = []
     self.compiler =\
         Compiler.Compiler(self.instructionstream)
     self.instructionStreamOffset = 0
     self.pcTest = 0
     self.pc = 0
     self.memory = Memory.Memory()
     self.pins = {
         "Vdd": 0,
         "D7": 0,
         "D6": 0,
         "D5": 0,
         "D4": 0,
         "D3": 0,
         "D2": 0,
         "D1": 0,
         "D0": 0,
         "Vcc": 0,
         "S2": 0,
         "S1": 0,
         "S0": 0,
         "Sync": 0,
         "Phase 2": 0,
         "Phase 1": 0,
         "Ready": 0,
         "Interrupt": 0,
     }
示例#4
0
 def __init__(self, parser, name):
     Node.__init__(self, parser)
     self.name = name
     try:
         p = Parser.Parser(name + '.lam')
         compiler = Compiler.Compiler(p)
         compiler.compile()
     except IOError:
         pass  #No lam module found, python modules will be sought by the python code
     except (SyntacticError, SemanticError) as se:
         print "In module " + name + ": "
         raise se
示例#5
0
 def SetCompilerByName(self, cmpl, name):
     '''设置编译器,例如修改了编译器的配置
     cmpl 是 Compiler 实例或者字典'''
     if isinstance(cmpl, dict):
         cmpl = Compiler.Compiler(cmpl)
     dstIdx = -1
     for idx, elm in enumerate(self.compilers):
         if elm["name"] == name:
             dstIdx = idx
             break
     if dstIdx != -1:
         self.compilers[dstIdx] = cmpl.ToDict()
示例#6
0
def compile_dir(dir_path):
    comiler = Compiler()
    for filePath in os.listdir(dir_path):
        filePath = dir_path + '/' + filePath
        if filePath.endswith('.h') is False:
            continue
        print(filePath)
        contents = comiler.compile(filePath)
        print(contents)
        output_file = filePath.replace('.h', '.m')
        print(output_file)
        with open(output_file, 'w+') as fw:
            fw.write(contents)
示例#7
0
def init():
    index()
    config = load_configuration()
    processor = Putil(config)
    processor.run_data_import()
    comp = Compiler(processor.data, config)
    comp.run_compilation()
    newFilenameSeventhTable = "DataAnalytics"
    freshfilename = newFilenameSeventhTable+'.csv'
    tablenew = "DataAnalytics"
    pathNew = './CsvData/'+freshfilename
    (ret, out, err)= run_cmd(['hdfs', 'dfs', '-rm','/tmp/Wheeltrue/CsvData/'+newFilenameSeventhTable+'.csv'])
    (ret, out, err)= run_cmd(['hdfs', 'dfs', '-copyFromLocal',pathNew,'/tmp/Wheeltrue/CsvData'])
    if (err != ''):
        print "Script Errored out while copying data into HWX"
        print "Original Error message:"+ err
        sys.exit(1)
    HwxConnection(tablenew , pathNew)
示例#8
0
 def GetCompilerByName(self, name):
     '''返回的是实例'''
     for i in self.compilers:
         if i["name"] == name:
             return Compiler.Compiler(i)
     return None
示例#9
0
#!/usr/bin/env python

from Compiler import *
from Earley_Parser import *
from VirtualMachine import *
import KeywordSub


def separate_a_section():
    print "="*100

if __name__ == "__main__":
    c, vm = Compiler(), VM()
    source, rules = Tokenizer.get_list(), Tokenizer.get_rules()
    s = EarleyParser(rules).parse(KeywordSub.keywords_substitution(source))
    if(s is None):
        print
        print "Syntax error."
        sys.exit(1)
    # separate_a_section()
    # PrintTree(s)
    # separate_a_section()
    c.Compile(s)
    c.program += [EXIT]
    separate_a_section()
    vm.Execute(c.program)
    separate_a_section()
    print 'Values : ' + str(vm.var_values)
示例#10
0

def printNode(node, deep):
    if node.kind != Parser.EMPTY:
        print("-" * deep + Parser.TYPES[node.kind])
        if node.operand1:
            printNode(node.operand1, deep + 1)
        if node.operand2:
            printNode(node.operand2, deep + 1)
        if node.operand3:
            printNode(node.operand3, deep + 1)


prog = parser.parse()
printNode(prog, 0)
compiler = Compiler()
program = compiler.compile(prog)

i = 0
while i < len(program):
    operation = VMTYPES[program[i]]
    if operation in VMTYPESWithARG:
        print(str(i) + ": " + operation)
        i += 1
        print(str(i) + ": " + str(program[i]))
        i += 1
    else:
        print(str(i) + ": " + operation)
        i = i + 1

vm = VM()
示例#11
0
import math
import sys
import Parser as prs
import PrePro as pp
import SymbolTable as st
import Compiler as cp

file = open(sys.argv[1], "r")
code = file.read()
file.close()
filtered = pp.PrePro.filter(code)
symbolTable = st.SymbolTable()
compiler = cp.Compiler()
node = prs.Parser.run(filtered)
node.Evaluate(symbolTable, compiler)
compiler.flush()
示例#12
0
# Generated from Nmod.g4 by ANTLR 4.7
from antlr4 import *
if __name__ is not None and "." in __name__:
    from .NmodParser import NmodParser
else:
    from NmodParser import NmodParser

from Compiler import *
c = Compiler()


# This class defines a complete listener for a parse tree produced by NmodParser.
class NmodListener(ParseTreeListener):

    # Enter a parse tree produced by NmodParser#program.
    def enterProgram(self, ctx:NmodParser.ProgramContext):
        pass

    # Exit a parse tree produced by NmodParser#program.
    def exitProgram(self, ctx:NmodParser.ProgramContext):
        pass


    # Enter a parse tree produced by NmodParser#f_type.
    def enterF_type(self, ctx:NmodParser.F_typeContext):
        pass

    # Exit a parse tree produced by NmodParser#f_type.
    def exitF_type(self, ctx:NmodParser.F_typeContext):
        pass
示例#13
0
import tkinter as tk
from PIL import ImageTk
from PIL import Image
from ScrollFrame import VerticalScrolledFrame
import csv
from Histogram import *
from Compiler import *

sheetData = Compiler()

default_logo = 'first_logo.jpg'
default_image = 'kachow.gif'

''' Reads and creates the list of dictionaries of the teams info '''
inf = []
with open('CSVTest.csv') as csvfile:
    reader = csv.DictReader(csvfile, delimiter='/')
    for column in reader:
        file = {'Team':column['Team'], 'Name':column['Name'], 'Drive':column['Drive'],
                'Chassis':column['Chassis'], 'Mechs':column['Mechs'], 'MechD':column['MechD'],
                'Autonomous Gear':column['aGear'], 'Autonomous Gear Positon':column['aGearPos'],
                'Autonomous Position Start':column['aPosStart'], 'Autonomous Position End':column['aPosEnd'],
                'Image':column['Image'], 'Logo':column['Logo'], 'aGearHist':None, 'tGearHist':None, 'presHist':None}
        inf.append(file)
        
    for rob in inf:
        rob['Mechs'] = rob['Mechs'].split(',')
        rob['MechD'] = rob['MechD'].split(',')

for rob in inf:
    #print(rob['Team'])
示例#14
0
import os
import sys

sys.path.append('./Imports')
import Putil
from Putil import Putil
import Compiler
from Compiler import Compiler
#from WeeklyUtilization import WeeklyUtilization

import json


def load_configuration():
    '''Simply loads the json register lookup file'''
    with open('register-lookup.json', 'r') as inputFile:
        config = json.load(inputFile)
    return config


config = load_configuration()
processor = Putil(config)
processor.run_data_import()

comp = Compiler(processor.data, config)
comp.run_compilation()

## First section looking at a week by week breakdown of wheel true utilization
#analyzer = WeeklyUtilization(processor.data)
#analyzer.run_utilization_study()
示例#15
0
#!/usr/bin/python -tt

import sys
from Compiler import *

DEBUG = False

for arg in sys.argv:
    if arg == "-d":
        DEBUG = True

doc = sys.stdin.read()

compiler = Compiler(DEBUG)
result = compiler.compile(doc)
print(result)