示例#1
0
def isValid(rulesText, programText):
    rules = parseRules(rulesText)
    table = ParseTable(rules)
    parser = LRParser(rules, table.actionTable(), table.gotoTable())
    try:
        derivation = parser.parse(programText)
    except:
        return False
    return True
示例#2
0
def constructParser(rulesText, isVerbose = False):
    def log(msg=""):
        if isVerbose:
            print msg

    log("Parsing rules...")
    rules = parseRules(rulesText)
    log("\n".join(map(str, rules)))
    log()
    
    log("Constructing parse tables...")
    table = ParseTable(rules)
    log(table)

    parser = LRParser(rules, table.actionTable(), table.gotoTable())
    return parser
示例#3
0
文件: main.py 项目: kms70847/parser
def slurp(filename):
    file = open(filename)
    rulesText = file.read()
    file.close()
    return rulesText


def tokenizeProgram(data):
    lines = data.split("\n")
    #remove comments
    lines = [line.partition("#")[0] for line in lines]
    characters = []
    inString = False
    for char in "".join(lines):
        if char == "\"":
            inString = not inString
        if char == " " and not inString: continue
        if char == "\t" and not inString: continue
        characters.append(char)
    return "".join(characters)


rulesText = slurp("language.txt")
rules = parseRules(rulesText)
programText = tokenizeProgram(slurp("test.k"))

parser = constructParser(rulesText)
rightDerivation = parser.parse(programText)

#print "result of {}:".format(programText)
interpret(rightDerivation, rules)
示例#4
0
def slurp(filename):
    file = open(filename)
    rulesText = file.read()
    file.close()
    return rulesText

def tokenizeProgram(data):
    lines = data.split("\n")
    #remove comments
    lines = [line.partition("#")[0] for line in lines]
    characters = []
    inString = False
    for char in "".join(lines):
        if char == "\"":
            inString = not inString
        if char == " " and not inString: continue
        if char == "\t" and not inString: continue
        characters.append(char)
    return "".join(characters)

rulesText = slurp("language.txt")
rules = parseRules(rulesText)
programText = tokenizeProgram(slurp("test.k"))



parser = constructParser(rulesText)
rightDerivation = parser.parse(programText)

#print "result of {}:".format(programText)
interpret(rightDerivation, rules)