def createGrammar(rules, symbols, markers, vars): if not rules: print('No rules defined') raise Exception('No rules defined') if not markers and not vars and not symbols: return Grammar(rules) if symbols and not vars and not markers: return Grammar(rules, symbols) if symbols and markers and not vars: return Grammar(rules, symbols, markers) if symbols and markers and vars: return Grammar(rules, symbols, markers, vars) if not symbols and markers and not vars: return Grammar(rules, 'abcdefghijklmnopqrstuvwxyz0123456789',\ markers) if not symbols and markers and vars: return Grammar(rules, 'abcdefghijklmnopqrstuvwxyz0123456789',\ markers, vars) if not symbols and not markers and vars: return Grammar(rules, 'abcdefghijklmnopqrstuvwxyz0123456789',\ [u'\u03b1',u'\u03b2',u'\u03b3',u'\u03b4']) if not markers and symbols and vars: return Grammar(rules, symbols, [u'\u03b1',u'\u03b2',u'\u03b3',u'\u03b4'],\ vars) raise Exception('No grammar type concidence')
def __init__(self, fileName): self.__grammar = Grammar(fileName) self.__firstSet = {} self.__followSet = {} self.generate_first_set() self.__parsing_table = {} print(self.__grammar.get_productions())
def __init__(self, grammar=Grammar()): self.grammar = grammar self.First = {} self.Follow = {} self.Select = {} self.Table = pd.DataFrame() self.terminals = []
def ReadFile(path): print("Reading " + path) file = open(path, "r") lines = file.readlines() # print(lines) file.close() # Remove the enter in every line for i in range(len(lines)): lines[i] = lines[i].replace("\n", "") # Not-terminal symbols alphabet not_Terminal = lines[0].split(",") # print(not_Terminal) # Terminal symbols alphabet terminal = lines[1].split(",") # print(terminal) # Starting non-terminal symbol start = lines[2] # print(start) # Productions productions = [] for i in range(3, len(lines)): temp = lines[i].split("->") productions.append((temp[0], temp[1])) # print(productions) return Grammar(not_Terminal, terminal, Node(start, None), productions)
def main(): g = Grammar() read_grammar_from_file(g) print(g.print_grammar()) f = compute_first(g) print(f.get_index(), " idx") print(f.print_first()) l = compute_follow(g, f) print(l.get_index(), " idx") print(l.print_follow()) table = compute_parse_table(g, f, l) print(table.print_table()) if table.check_ll1(): print("Grammar is LL(1)") print("PRODUCTION STRING: ") w = "a*(a+a)" parse_sequence(w, table, g.get_start_symbol()) else: print("Grammar is not LL(1)...")
def main(): application_path = "" if getattr(sys, 'frozen', False): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) filepath = os.path.join(application_path, 'input.txt') # TODO 1 : 从文件读入文法,要求此文法不包含左递归 G = Grammar() G.read_from_file(filepath) # TODO 2 : 构建语法分析预测器,导入文法,并打印文法 predictive_parser = Predictive_Parser(grammar=G) predictive_parser.grammar.print_grammar() # TODO 3 : 分别求出First、Follow、Select集,并判断文法是否属于LL1文法 predictive_parser.judgeLL1() # TODO 4 : 构建语法分析预测表并打印 predictive_parser.cal_table() # TODO 5 : 输入字符串进行语法分析预测 predictive_parser.judge('eadeaa#') predictive_parser.judge('edeaebd#') predictive_parser.judge('edeaeaadabacae#')
def transformGrammar(self, afd, string): non_terminals = afd.states terminals = afd.alphabet initial_nt = afd.initialState productions = afd.transitions epsilon_prod = "" msg = "" for item in afd.getAcceptanceStates(): prod = {'fS': item, 't': 'epsilon', 'lS': ""} # epsilon_prod = f"{item}>epsilon" productions.append(prod) for item in productions: item["String"] = f"{item['fS']}>{item['t']} {item['lS']}" grammar = Grammar(afd.getName()) for nt in non_terminals: grammar.setNonTerminals(nt) for t in terminals: grammar.setTerminals(t) grammar.setInitialNT(initial_nt) for p in productions: grammar.setProductions(p['String']) grammarExtended = grammar.evaluateString(string) msg = f"{grammarExtended}" return msg
def testCreate1(self): P = Parser() G = Grammar() G.readGrammar('gram1.txt') P.setGrammar(G) self.assertEqual(P._non_terms, set(["S", "A"])) self.assertEqual(P._terms, set(["0", "1", "$"]))
def main(): parser = argparse.ArgumentParser( description='Argumentos para entrada de arquivo e palavra') parser.add_argument('-f', '--filename', help='Input filename', required=False) parser.add_argument('-w', '--word', help='Input word', required=False) args = parser.parse_args() if args.filename: grammar_path = args.filename else: grammar_path = 'gramatica_exemplos/gramatica_exemplo_loop.txt' with open(grammar_path, 'r') as gf: grammar = gf.readline().rstrip() g = Grammar(grammar) ehValido = g.validateGrammar() if args.word: word = args.word else: word = input('Digite a palavra a ser validada: ') g.recognize(word) if ehValido: dfa = Automata(start_state=g.startSymbol) dfa.convertGrammar(g) dfa.convertER() print('A ER gerada é: ') print(dfa.ER)
def __init__(self): self.__grammar = Grammar("grammar2") self.__LL1Table = {} self.__first = {} self.__FIRST = {} self.__follow = {} self.__FOLLOW = {}
def run(self): gr = Grammar(self.__file_name) gr.readGrammarFromFile() while True: self.printMenuGrammar() cmd = int(input("\tChoose a command: ")) if cmd == 1: print(gr.getNonTerminals()) if cmd == 2: print(gr.getTerminals()) if cmd == 3: print(gr.getStartSymbol()) if cmd == 4: print(gr.getRules()) if cmd == 5: cmd2 = input("Choose non-terminal: ") print(gr.getProductionsForAGivenNonTerminal(cmd2)) if cmd == 6: print(gr.getFA()) if cmd == 7: if gr.isRegular(): print("\nThe grammar is regular\n") else: print("\nThe grammar is not regular\n") if cmd > 7 or cmd < 0: print("Choose a valid command: ") if cmd == 0: return
def main(): rule_num = 1 f = open("LSystemsCode.txt", "r", encoding='utf-8') lines = f.readlines() grammar = Grammar(lines) for rule in grammar.rules: key, value = rule.split("->") SYSTEM_RULES[key.strip()] = value.strip() rule_num += 1 axiom = grammar.axiom iterations = grammar.steps model = derivation(axiom, iterations) segment_length = 5 alpha_zero = 90 angle = grammar.angle r_turtle = set_turtle(alpha_zero) turtle_screen = turtle.Screen() turtle_screen.screensize(1500, 1500) draw_l_system(r_turtle, model[-1], segment_length, angle, grammar.alfabet) ts = turtle.getscreen().getcanvas() canvasvg.saveall("LSystem.svg", ts, None, 10, None) turtle_screen.exitonclick()
def getInfo(self): # Make a list of the productions productionsList = [y for y in (x.strip() for x in self.productions.toPlainText().splitlines()) if y] productionsDict = {} # Convert the productions to a dictionary # Key is the LSH # Value is the list of transitions for _ in productionsList: x = _.replace(" ", "").split("->") y = x[1].split("|") productionsDict[x[0]] = y NT = [] T = [] # Append the non terminals to the list for i in productionsDict: NT.append(i) # Append the terminals to the list for i in productionsDict: for j in productionsDict[i]: for k in j: if k not in NT: T.append(k) # Remove duplicates from non terminals and terminals NT = list(set(NT)) T = list(set(T)) # Initialize the Grammar class grammar = Grammar(productionsDict, NT, T) # If the given grammar is left recursive if grammar.leftRecursive(): self.LR.setText("Left Recursive") # Emit a signal to go to the next window self.switch_window.emit(grammar) else: self.LR.setText("Not Left Recursive")
def click(self): gram=self.textEdit.toPlainText().split('\n') sentence=self.lineEdit.text() grammar=Grammar() grammar.insert_from_arr(gram) if grammar.have_left_recursion()==True: grammar.eliminating_left_recursion() if grammar.have_left_factor()==True: grammar.left_factoring() follow=grammar.follow() if grammar.is_LL1()==False: print('不是LL(1)文法') return table=grammar.consuct_predictive_parsing_table(follow) step = grammar.predictive_parsing(table, sentence) for i in range(len(step)): self.tableWidget.insertRow(self.tableWidget.rowCount()) for i in range(self.tableWidget.rowCount()): for j in range(self.tableWidget.columnCount()): self.tableWidget.setItem(i,j,QTableWidgetItem(step[i][j])) #把分析过程写入Excel文件中 from pyexcelerate import Workbook data = step # data is a 2D array wb = Workbook() wb.new_sheet("step", data=data) wb.save("output.xlsx")
def traslateThaiHmong_Thread(self,allSentence): usegrammar = Grammar() # getPlob = Translate() data = backend.Database.Database() pool = mp.Pool(processes=5) try: allSentence = str(allSentence) getSentence = allSentence.split("\n") s_sentence =[] wordlist = [] # for sentence in getSentence : # wordlist.append(usegrammar.grammarHmong(sentence)) # print(wordlist) pool = mp.Pool(processes=3) wordlist = (pool.map(usegrammar.grammarHmong, getSentence)) print(wordlist) return wordlist except Exception as e: print(e) print("in method traslateThaiHmong") return s_sentence
def fit(line1: str, line2: str, line3: str, line4: str) -> (str, Grammar): alphabet = set(line1) separator = line2 if separator == "": separator = Rule.rule_separator rules = [Rule(rule, separator) for rule in line3.split()] grammar = Grammar(rules, alphabet) return line4, grammar
def testParser_no_follow(self): P = Parser() G = Grammar() G.readGrammar('gram1.txt') P.setGrammar(G) self.assertTrue(P.parse("01")) self.assertFalse(P.parse("0")) self.assertTrue(P.parse("0011"))
def value_grammar(file, name): grammar = Grammar(name) iClass = IntermiddleClass() lines = file.read().split("\n") non_terminals = [] terminals = [] productions = [] epsilon_prods = [] iNT = lines[0][0] for line in lines: arrayLine = line.split('>') non_terminal = arrayLine[0] non_terminals.append(non_terminal) produceds = arrayLine[1] if produceds != 'epsilon': terminalOne = produceds[0] if terminalOne == terminalOne.lower(): terminals.append(terminalOne) if len(produceds) > 1: terminalTwo = produceds[2] if terminalTwo == terminalTwo.lower(): terminals.append(terminalTwo) productions.append(line) newNonTerminals = set(non_terminals) for nt in newNonTerminals: grammar.setNonTerminals(nt) newTerminals = set(terminals) for t in newTerminals: grammar.setTerminals(t) grammar.setInitialNT(iNT) for prod in productions: grammar.setProductions(prod) string = input('Ingresar la cadena a evaluar: ') if (grammar.onlyEvaluate(string)): print("----------------------Cadena válida----------------------") else: print("----------------------Cadena invalida----------------------") print("\n----------------------Gramatica expandida----------------------") print(grammar.evaluateString(string)) print( "\n----------------------Transformada en ruta de AFD----------------------" ) print(iClass.transformAFD(grammar, string)) file.close() wait_for("Presionar enter para continuar", "\n") os.system('clear')
def __init__(self, grammarFileName, givenParserOutputName): self.currentState = 'q' self.index = 0 self.workingStack = [] self.grammar = Grammar(grammarFileName) self.parserOutput = ParserOutput(givenParserOutputName, self.grammar) self.debug = True self.inputStack = [] self.epsilonCount = 0 self.derivationsString = ""
def __init__(self, ): self.gram = Grammar() self.look_ahead = 1 self.stack = ["$"] #list stack, append pushes, self._non_terms = set() self._terms = set("$") self.parsetable = self.resetParsetable( ) #dict of {non_term:{term:(first,follow)} self.firstsets = {} self.followsets = {}
def __init__(self, grammar=Grammar()): self.Grammar = grammar self.operator_table = pd.DataFrame(data="", index=self.Grammar.terminals, columns=self.Grammar.terminals) self.FIRSTVT = pd.DataFrame(data=0, index=self.Grammar.nonterminals, columns=self.Grammar.terminals) self.LASTVT = pd.DataFrame(data=0, index=self.Grammar.nonterminals, columns=self.Grammar.terminals)
def create_grammar(not_terminals, right_sides): # TODO eliminar funciones de terminal production_list = [] for i, x in enumerate(not_terminals): prod = Production(not_terminals[i], right_sides[i]) production_list.append(prod) global __this_grammar __this_grammar = Grammar(production_list) __set_NTs_range(production_list) print(i)
def getInfo(self): # Make a list of the productions productionsList = [ y for y in (x.strip() for x in self.productions.toPlainText().splitlines()) if y ] productionsDict = {} # Convert the productions to a dictionary # Key is the LSH # Value is the list of transitions for _ in productionsList: x = _.replace(" ", "").split("->") y = x[1].split("|") productionsDict[x[0]] = y # Get the start symbol startSymbol = self.start.text() NT = [] T = [] first = {} follow = {} # Append the non terminals to the list # Create a set for first and follow of each non terminal for i in productionsDict: NT.append(i) first[i] = set() follow[i] = set() # Append the terminals to the list for i in productionsDict: for j in productionsDict[i]: for k in j: if k not in NT: T.append(k) first[k] = set(k) follow[k] = set(k) # Remove duplicates from non terminals and terminals NT = list(set(NT)) T = list(set(T)) # Initialize the Grammar class grammar = Grammar(productionsDict, startSymbol, NT, T, first, follow) # Set the terminals and non terminals in the text fields self.terminals.setText(str(grammar.T)) self.nonTerminals.setText(str(grammar.NT)) # Get the first of the non terminals grammar.getFirst() # Get the follow of the non terminals grammar.getFollow() # Create the table of first and follow self.creatingTables(grammar)
def main(): # TODO 1 : 从文件读入文法,要求此文法不包含左递归 G = Grammar() G.read_from_file("./data5.3_origin.txt") # TODO 2 : 构造简单分析分析器 analysis = OperatorPrecedenceAnalysis(grammar=G) analysis.Grammar.print_grammar() # TODO 3 : 计算FIRSTVT集 analysis.get_operator_table() analysis.print_operator_table() # TODO 4 : 对输入串 i+i# 的算符优先归约过程 analysis.judge('i+i#')
def main(): # TODO 1 : 从文件读入文法,要求此文法不包含左递归 G = Grammar() G.read_from_file("./data4.1_origin.txt") # TODO 2 : 构建语法分析预测器,导入文法,并打印文法 predictive_parser = Predictive_Parser(grammar=G) predictive_parser.grammar.print_grammar() # TODO 3 : 分别求出First、Follow、Select集,并判断文法是否属于LL1文法 predictive_parser.judgeLL1() # TODO 4 : 构建语法分析预测表并打印 predictive_parser.cal_table() # TODO 5 : 输入字符串进行语法分析预测 predictive_parser.judge('(a,a)#')
def main(): global input, grammar g = Grammar(grammar) g.parse() gotos = GotoGenerator(g) gotos.generate() gotos.display() g.first_follow.display() parsing_table = Table(g, gotos) parsing_table.generate() lr_parser = LR_Parser(g, parsing_table, input) lr_parser.parse()
def main2(): g = Grammar() read_grammar_from_file_2(g) print(g.print_grammar()) f = compute_first2(g) print(f.get_index(), " idx") print(f.print_first()) l = compute_follow(g, f) print(l.get_index(), " idx") print(l.print_follow()) table = compute_parse_table2(g, f, l) print(table.print_table()) if table.check_ll1(): print("Grammar is LL(1)") w = [ "25", "0", "32", "0", "17", "21", "0", "16", "0", "22", "14", "26" ] # { if x > 3 { write ( yes) ; } } q = [ "25", "6", "0", "28", "1", "25", "10", "21", "1", "22", "14", "26", "26" ] # { while x < y { read p ; } } q2 = ["25", "8", "0", "28", "0", "25", "9", "0", "14", "26", "26"] # { int : x ; x = 5 ; if x > 3 { write ( yes) ; } } q3 = [ "25", "4", "33", "0", "14", "0", "32", "1", "14", "6", "0", "28", "1", "25", "10", "21", "1", "22", "14", "26", "26" ] # ww = read_w() # print("WW: ", ww) print("PRODUCTION STRING: ") parse_sequence2(q3, table, g.get_start_symbol()) else: print("Grammar is not LL(1)...")
def main(): G = Grammar(grammar) if G.grammarType[0] != 3: print "Not a regular grammar!\n" return FSM = FiniteStateMachine(G, "K", ["V"]) drawFSM(FSM, "FSM_initial") FSM.removeUnreachableStates() FSM.determinate() drawFSM(FSM, "FSM_determinated") FSM.printRules()
def __init__(self, grammar_file_path='resource/grammar.txt', lexer=None, debug=False): if lexer is None: lexer = Lexer() self.lexer = lexer self.G = Grammar(grammar_file_path) self.states = [] # 构造SLR自动机状态集合 self.action = [] self.goto = [] self.__terminals = list(self.G.terminals) self.__non_terminals = list(self.G.non_terminals) self.__init_states__() self.__init_analysis_table__() if debug: self.print_states() self.save_info_to_excel()
def main(): pars_arg() args = parser.parse_args() if args.fileInput == args.fileOutput: print("ERROR: you must give different name of files", file=sys.stderr) return 1 raw_grammar = "" try: with open(args.fileInput, "r") as file: raw_grammar = file.read() except: print("ERROR: cannot open " + args.fileInput, file=sys.stderr) return 1 g = Grammar(raw_grammar) g.parse() gotos = GotoGenerator(g) gotos.generate() gotos.display(args.verbose) gotos.create_file(args.fileOutput, args.verbose) return 0