def parse(self, expr): self.names = [] self.funcs = [] try: yacc.parse(expr) except NineMLMathParseError, e: raise NineMLMathParseError(str(e) + " Expression was: '%s'" % expr)
def main( arg=sys.argv ) : # Now, this lexer actually takes a string; it doesn't (that I yet know) # read from a file. So, you can parse the file as you like, and feed it # to the lexer. # we're going to read a line at a time from stdin global index index = 0 line = sys.stdin.readline() while line : index += 1 '''lex.input( line ) line_cnt += 1 print "\nLine #", line_cnt # attempt to get that first token tok = lex.token() while tok : print tok tok = lex.token() ''' yacc.parse(line) line = sys.stdin.readline()
def test_parser(arg=sys.argv): #data = ( '2', '232', '98237492' ) #data = [ '2+4', '2-4', '2*37' ] #data.extend( [ 'x', 'foo', 'sof' ] ) #data = '''x:=3; s:=0; while x do s := s+x ; x := x-1 od''' #data = '''x := 12; # if x then # y := 13 # else # y := 0 # fi''' #data = 'if 5 then x := 13 else x:=0 fi' #data = ''' #define sum ( i ) #proc # return := 0; # while i do # return := return + i; # i := i - 1 # od #done; #x := 5; #sum( x )''' #initialize_builtins() print "Please enter the program, terminate with CTRL+D on a new line" data = "" data += sys.stdin.read() yacc.parse(data)
def run(self): s = sys.stdin.read() s = s.replace('\n\n', '\x00') s = s.replace('\x00\x00', '\x00') s = s.replace('\n\n', '') s = s.replace('\n', ' ') s = s.replace(' ', ' ') yacc.parse(s) print self.sentences self.markov.printout() print print "clause starters" keys = self.clause_starter.keys() keys.sort() for k in keys: v = self.clause_starter[k] print "\t", repr(k), v print print "para starters", self.para_starter print self.markov.prepare() sentence = random_sentence(self.markov, 800, starters=self.clause_starter, para_starters=self.para_starter) print_sentence(sentence, word_filter=self.words)
def terminal(): #This method handles what is performed when Induce is called from the terminal or main function #The communication options are the key here # Define a dictionary: a default set of stuff to do with one keypress opts, detupler = getopt.getopt(sys.argv[1:], "nhmgl:r:s:", ["node", "hist",\ "match", "gen", "learningrate=", "rounds="]) global outputSettings for o,a in opts: if o in ("-n", "--node"): outputSettings['node'] = True if o in ("-h", "--hist"): outputSettings['hist'] = True if o in ("-m", "--match"): outputSettings['match'] = True if o in ("-g", "--gen"): outputSettings['gen'] = True import ply.yacc as yacc yacc.yacc() while 1: try: s = read_line("> ") except EOFError: break if not s: continue yacc.parse(s)
def test_parser(data) : """ Test method for the parser. :param data: string data from either a file or text input. """ yacc.parse(data)
def scan(self): global build_errors,cuadruplos,ids,temp_counter,counter,types,values,pOper,pilaO,braces,pSaltos #Lex construction import ply.lex as lex lex.lex() #Sintax construction import ply.yacc as yacc yacc.yacc() del build_errors[:] #Structure cleaning ids.dispatch() types.dispatch() values.dispatch() pOper.dispatch() pilaO.dispatch() braces.dispatch() pSaltos.dispatch() cuadruplos.clear() #Counters restart temp_counter = 0 counter = 0 #Compiling entry yacc.parse(self.entrada) #Return the build error's array or null in case there weren't any return build_errors
def run(self): """Running the parser.""" logging.debug("running parser with filename: [" + self._filename + "]") if self._lexeronly: logging.debug("doing *ONLY* lexical analysis, skipping syntactical analysis") ## debug output of lexical analysis: (FIXXME: replace with yacc parsing) for line in fileinput.input([self._filename]): logging.info(" processing line: [" + line.strip() + "]") ## Give the lexer some input lex.input(line) # Tokenize while True: token = lex.token() if not token: break # No more input logging.debug(str(token)) else: yacc.parse(open(self._filename).read()) ## report number of errors if self._numerrors>0: logging.critical("-> " + str(self._numerrors) + " ERRORS found while parsing " + self._filename) else: logging.info("No errors found while parsing " + self._filename)
def test_parser( arg=sys.argv ) : #data = ( '2', '232', '98237492' ) #data = [ '2+4', '2-4', '2*37' ] #data.extend( [ 'x', 'foo', 'sof' ] ) #data = '''x:=3; s:=0; while x do s := s+x ; x := x-1 od''' #data = '''x := 12; # if x then # y := 13 # else # y := 0 # fi''' #data = 'if 5 then x := 13 else x:=0 fi' #data = ''' #define sum ( i ) #proc # return := 0; # while i do # return := return + i; # i := i - 1 # od #done; #x := 5; #sum( x )''' data = sys.stdin.read() yacc.parse( data )
def parse_webwork(expr): global expr_tree parsed = handle_comma_separated_number(expr) if parsed is None: #didn't match comma_separated_number, so parse expr yacc.parse(expr) parsed = expr_tree return reduce_associative(parsed)
def validate(self, config, halt_on_errors=True, verbose=False): self.validation_errors = [] self.comments = [] self.halt_on_errors = halt_on_errors self.verbose = verbose yacc.parse(config)
def test_parser( arg=sys.argv ) : #data = ( '2', '232', '98237492' ) #data = [ '2+4', '2-4', '2*37' ] #data.extend( [ 'x', 'foo', 'sof' ] ) #data = '''x:=3; s:=0; while x do s := s+x ; x := x-1 od''' #data = '''x := 12; # if x then # y := 13 # else # y := 0 # fi''' #data = 'if 5 then x := 13 else x:=0 fi' data = ''' define sum proc(n) i := n; s := 0; while i do s := s + i; i := i-1 od; return := s end; x := 7; if x then s := sum(x) else x := 0 - x fi ''' #data = sys.stdin.read() yacc.parse( data )
def parse(self, manifest): assert manifest != None manifest = re.sub(r'\r','', manifest) #manifest = re.sub(r'\r\n ', '', manifest) manifest = re.sub(r'\n ', '', manifest) #headers = re.split(r'\r\n', manifest) headers = re.split(r'\n', manifest) self.ast = Ast() for header in headers: if header.startswith('Import-Package:') \ or header.startswith('Export-Package:') \ or header.startswith('Require-Bundle:') \ or header.startswith('Bundle-SymbolicName:') \ or header.startswith('Fragment-Host:') \ or header.startswith('Bundle-Version:'): #if header.startswith('Bundle-Version:'): # print header # h4x0r if header.startswith('Require-Bundle:') \ or header.startswith('Fragment-Host:'): header = re.sub(r'bundle-', '', header) #print header yacc.parse(header) return self.ast.bundle
def __init__(self, line, **kw): self.debug = kw.get('debug', 0) self.line = line self.searchtree = [] self.numcases = 1 try: modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__ except: modname = "parser"+"_"+self.__class__.__name__ self.debugfile = modname + ".dbg" self.tabmodule = modname + "_" + "parsetab" #print self.debugfile, self.tabmodule #self.debug = True # Build the lexer and parser lex.lex(module=self, debug=self.debug) yacc.yacc(module=self, debug=self.debug, debugfile=self.debugfile, tabmodule=self.tabmodule) yacc.parse(self.line) for s in self.searchtree: if isinstance(s, SearchSubSpace): self.numcases *= s.size
def validate(expression): yacc.yacc() try: yacc.parse(expression) return True except: return False
def parse_line(line, l_count, mnem_base): global lines_count lines_count = l_count global gmnem_base gmnem_base = mnem_base lexer.input(line) yacc.parse(line)
def textEditorParseMe(filename): tokens = ['FILENAME', 'NUMBERSEQUENCE'] def t_FILENAME(t): r'[a-zA-Z_/.][a-zA-Z0-9_/.]*' return t def t_NUMBERSEQUENCE(t): r'[0-9 :]+' return t t_ignore = '\t: ' def t_newline(t): r'\n+' t.lexer.lineno += t.value.count("\n") def t_error(t): print "Illegal character '%s'" % t.value[0] t.lexer.skip(1) lex.lex() count = [] latency = [] organized = {} def p_sentence(p): '''sentence : FILENAME NUMBERSEQUENCE''' tmp1 = [] tmp = p[2].split(':') for x in tmp: x = x.strip() tmp1.append(x) organized[int(tmp1[0])] = tmp1[1].split(' ') def p_error(p): if p: print("Syntax error at '%s'" % p.value) else: print("Syntax error at EOF") yacc.yacc() file = open(filename, 'r') while file: line = file.readline() if not line : break yacc.parse(line[0:-1]) return organized
def run(self, prompt='parser > '): while 1: try: s = raw_input(prompt) except EOFError: break if not s: continue yacc.parse(s)
def test_parser(arg=sys.argv): print "Please enter the program, terminate with CTRL+D on a new line" data = "" data += sys.stdin.read() yacc.parse(data)
def run(self): while 1: try: s = input('ninshu > ') except (EOFError, KeyboardInterrupt): break yacc.parse(s) print(self.identifiers)
def parse1(): while 1: try: s = raw_input('calc > ') except EOFError: break if not s: continue print yacc.parse(s)
def run(self): while 1: try: s = raw_input('calc > ') except EOFError: break if not s: continue yacc.parse(s)
def parsing_map(from_file, to_file): m_array = array.array('L') for l in open(from_file,'r'): if l[0] == '(': # mapping data yacc.parse(l) m_array.extend(oneline) f = open(to_file, 'wb') m_array.tofile(f) f.close()
def read_prompt(): while 1: try: s = raw_input("$ ") except EOFError: break if not s: continue yacc.parse(unicode(s))
def parse(self, text=None): if self.filename != None and text == None: with open(self.filename, 'r') as input_fo: file_contents = input_fo.read() return yacc.parse(file_contents) elif text != None: return yacc.parse(text) else: return None
def parseFile(fileName): root.functions = [] fileStr = open(fileName,'r').read() yacc.parse(fileStr) retRoot = copy.deepcopy(root) return retRoot
def run(self): while 1: try: s = raw_input('modelica > ') except EOFError: break if not s: continue yacc.parse(s) print self.classes
def parse_file(filename): global output global tokens output = '' gdllex.lex_file(filename) tokens = gdllex.tokens yacc.yacc() file = open(filename, 'r').read() yacc.parse(file)
def get_proc_name(): return proc_name def run_parser(): yacc.yacc() import sys file = open(sys.argv[1]) lines = file.readlines() file.close() strings = "" for i in lines:
def run(self, data=None): if data: yacc.parse(data) else: while 1: try: s = input('ninshu > ') except (EOFError, KeyboardInterrupt): break yacc.parse(s) print(self.identifiers)
'term : term "/" factor' p[0] = p[1] / p[3] def p_factor_number(p): "factor : NUMBER" p[0] = p[1] def p_factor_name(p): "factor : NAME" try: p[0] = names[p[1]] except LookupError: print("Undefined name '%s'" % p[1]) p[0] = 0 def p_error(p): if p: print("Syntax error at '%s'" % p.value) else: print("Syntax error at EOF") import ply.yacc as yacc yacc.yacc() inp = f.readlines() for s in inp: yacc.parse(s)
from engine.cleaner import remove_pyc_files if __name__ == "__main__": # Initialize yacc yacc.yacc(debug=0, write_tables=0, start="program") # Retrieve command line arguments and options options = parse_options() # Check if the input arguments is consistent check_input_filename(options["input"]) # Check if the output argument is consistent (if not correct it) options["output"] = check_output_filename(options["output"]) # Retrieve the content of the ADL file input_file = open(options["input"], "r") input_file_content = input_file.read() input_file.close() # Parse the content of the ADL file yacc.parse(input_file_content) # Produce the output XML file write_output_file(options["output"]) # Remove bytecode dirname = os.path.dirname(sys.argv[0]) remove_pyc_files(dirname)
def calculate(self): # Convert existing result values to numpy array # scalars P1 = np.array(self.result_obj.PrincipalMax) P2 = np.array(self.result_obj.PrincipalMed) P3 = np.array(self.result_obj.PrincipalMin) vM = np.array(self.result_obj.vonMises) Peeq = np.array(self.result_obj.Peeq) T = np.array(self.result_obj.Temperature) MF = np.array(self.result_obj.MassFlowRate) NP = np.array(self.result_obj.NetworkPressure) sxx = np.array(self.result_obj.NodeStressXX) syy = np.array(self.result_obj.NodeStressYY) szz = np.array(self.result_obj.NodeStressZZ) sxy = np.array(self.result_obj.NodeStressXY) sxz = np.array(self.result_obj.NodeStressXZ) syz = np.array(self.result_obj.NodeStressYZ) exx = np.array(self.result_obj.NodeStrainXX) eyy = np.array(self.result_obj.NodeStrainYY) ezz = np.array(self.result_obj.NodeStrainZZ) exy = np.array(self.result_obj.NodeStrainXY) exz = np.array(self.result_obj.NodeStrainXZ) eyz = np.array(self.result_obj.NodeStrainYZ) rx = np.array(self.result_obj.ReinforcementRatio_x) ry = np.array(self.result_obj.ReinforcementRatio_y) rz = np.array(self.result_obj.ReinforcementRatio_z) mc = np.array(self.result_obj.MohrCoulomb) # vectors dispvectors = np.array(self.result_obj.DisplacementVectors) x = np.array(dispvectors[:, 0]) y = np.array(dispvectors[:, 1]) z = np.array(dispvectors[:, 2]) s1x, s1y, s1z = np.array([]), np.array([]), np.array([]) s2x, s2y, s2z = np.array([]), np.array([]), np.array([]) s3x, s3y, s3z = np.array([]), np.array([]), np.array([]) # If PSxVector is empty all UserDefined equation does not work if self.result_obj.PS1Vector: ps1vector = np.array(self.result_obj.PS1Vector) s1x = np.array(ps1vector[:, 0]) s1y = np.array(ps1vector[:, 1]) s1z = np.array(ps1vector[:, 2]) if self.result_obj.PS2Vector: ps2vector = np.array(self.result_obj.PS2Vector) s2x = np.array(ps2vector[:, 0]) s2y = np.array(ps2vector[:, 1]) s2z = np.array(ps2vector[:, 2]) if self.result_obj.PS3Vector: ps3vector = np.array(self.result_obj.PS3Vector) s3x = np.array(ps3vector[:, 0]) s3y = np.array(ps3vector[:, 1]) s3z = np.array(ps3vector[:, 2]) FreeCAD.FEM_dialog["results_type"] = "None" self.update() self.restore_result_dialog() userdefined_eq = self.result_widget.user_def_eq.toPlainText() # Get equation to be used # https://forum.freecadweb.org/viewtopic.php?f=18&t=42425&start=10#p368774 ff # https://github.com/FreeCAD/FreeCAD/pull/3020 from ply import lex from ply import yacc import femtools.tokrules as tokrules identifiers = [ "x", "y", "z", "T", "vM", "Peeq", "P1", "P2", "P3", "sxx", "syy", "szz", "sxy", "sxz", "syz", "exx", "eyy", "ezz", "exy", "exz", "eyz", "MF", "NP", "rx", "ry", "rz", "mc", "s1x", "s1y", "s1z", "s2x", "s2y", "s2z", "s3x", "s3y", "s3z" ] tokrules.names = {} for i in identifiers: tokrules.names[i] = locals()[i] lexer = lex.lex(module=tokrules) yacc.parse(input="UserDefinedFormula={0}".format(userdefined_eq), lexer=lexer) UserDefinedFormula = tokrules.names["UserDefinedFormula"].tolist() tokrules.names = {} # UserDefinedFormula = eval(userdefined_eq).tolist() if UserDefinedFormula: self.result_obj.UserDefined = UserDefinedFormula minm = min(UserDefinedFormula) maxm = max(UserDefinedFormula) self.update_colors_stats(UserDefinedFormula, "", minm, maxm)
def parseONP(input_str): return yacc.parse(input_str)
def p_expr_minus(p): '''expr : expr MINUS term''' p[0] = ('-', p[1], p[3]) def p_expr_term(p): '''expr : term''' p[0] = p[1] def p_term_mul(p): '''term : term TIMES factor | term DIVIDE factor''' p[0] = ('*', p[1], p[3]) def p_term_factor(p): '''term : factor''' p[0] = p[1] def p_factor(p): '''factor : NUMBER''' p[0] = ('NUM', p[1]) yacc.yacc() data = "x = 3*4 + 5*6" # data = "x = 4 + 5" t = yacc.parse(data) print t
else: p[0] = [p[1]] def p_molecul(p): '''molecul : NUMBER_OF_ATOMS FLOAT SEP LABEL EOL atoms EOL''' p[0] = Molecul(p[1], p[2], p[4], p[6]) def p_atom(p): '''atom : SYMBOL SEP FLOAT SEP FLOAT SEP FLOAT''' p[0] = Atom(p[1], p[3], p[5], p[7]) def p_atoms(p): '''atoms : atom | atoms EOL atom''' if len(p) > 2: p[0] = p[1] + [p[3]] else: p[0] = [p[1]] def p_error(p): print("Syntax error at '%s'" % p.value) yacc.yacc() test = yacc.parse(open("total.molden").read())
def infun(s1): yacc.parse(s1)
def inputfunction(s1): yacc.parse(s1)
def scanincludes(string, inclst, curdir, incpaths): """Scan ctype files for #includes Adds and returns new includes to the supplied include list input: string with the file contents to scan, a include list string with the current working dir """ tokens = ( "GINCLUDE", "LINCLUDE", #"BUNDLEINC", "IFDEF", "ENDIF", ) states = ( ("com", "exclusive"), #comment ("ifdef", "inclusive"), ) t_ANY_ignore = " \t" def t_begin_com(t): r"/\*" t.lexer.push_state("com") def t_com_end(t): r"\*/" t.lexer.pop_state() pass def t_line_com(t): r"//.*" pass def t_ANY_begin_if0(t): r"\#if[ \t]+0" t.lexer.push_state("com") def t_com_endif(t): r"\#endif" t.lexer.pop_state() pass def t_com_ifdef(t): r"\#ifdef" t.lexer.push_state("com") def t_IFDEF(t): r"\#ifdef[ \t]+[a-zA-Z_][a-zA-Z0-9_]*" t.value = t.value[6:].strip() #return the ifdef name t.lexer.push_state("ifdef") return t def t_ifdef_ENDIF(t): r"\#endif" t.lexer.pop_state() return t def t_GINCLUDE(t): r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+<.*\.h>" t.value = t.value[t.value.find("<"):].strip().strip("<>") return t def t_LINCLUDE(t): r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+\".*\.h\"" t.value = t.value[t.value.find('"'):].strip().strip('""') return t def t_BUNDLEINC(t): r"\#[ \t]*[Ii][Nn][Cc][Ll][Uu][Dd][Ee][ \t]+<.*>" pass def t_ANY_error(t): #print("Illegal character '%s'" % t.value[0]) t.lexer.skip(1) lexer = lex.lex() #lexer.input(string) # #for tok in lexer: # print(tok) # #YACC stuff here def p_includes2(p): """ includes : includes ginc """ if islocalinc(p[2], curdir, incpaths): p[1][1].add(p[2]) else: p[1][0].add(p[2]) p[0] = p[1] def p_lincludes(p): """ includes : includes linc """ locincpaths = incpaths + [curdir + "/"] if islocalinc(p[2], curdir, locincpaths): p[1][1].add(p[2]) else: p[1][0].add(p[2]) p[0] = p[1] def p_ifdef(p): """ includes : includes IFDEF includes ENDIF | IFDEF includes ENDIF """ if len(p) == 5: p[1][2] = addnewifdefs(p[1][2], {p[2]: p[3]}) p[0] = p[1] else: ifdef = {} ifdef[p[1]] = p[2] p[0] = [set(), set(), ifdef] def p_ifdefempty(p): """ includes : includes IFDEF ENDIF | IFDEF ENDIF """ if len(p) == 4: p[0] = p[1] else: p[0] = [set(), set(), {}] def p_ginc(p): "includes : ginc" globinc = set() globinc.add(p[1]) if islocalinc(p[1], curdir, incpaths): p[0] = [set(), globinc, {}] else: p[0] = [globinc, set(), {}] def p_linc(p): "includes : linc" locinc = set() locinc.add(p[1]) locincpaths = incpaths + [curdir + "/"] if islocalinc(p[1], curdir, locincpaths): p[0] = [set(), locinc, {}] else: p[0] = [locinc, set(), {}] def p_ginclude(p): "ginc : GINCLUDE" p[0] = p[1] def p_linclude(p): "linc : LINCLUDE" p[0] = p[1] def p_error(p): print("syntax error at '%s'" % p.type) pass yacc.yacc() newinclst = yacc.parse(string) if newinclst == None: #Check if the file didn't have any includes return (inclst) newinclst = addnewincludes(newinclst, inclst) return (newinclst)
parser.parse(data.read(), debug=False) currentFile="Template" result=parser.parse(template.read(), debug=False) outputfile=output outputfile.write(str(result)) outputfile.close() """ parser = yacc.yacc() if __name__ == "__main__": print("\n") datass = open(sys.argv[1], 'r') datass = datass.read() result = yacc.parse(datass) print(usednames) print("\n \n <><><> Parsing <><><> ") template = open(sys.argv[2], 'r') template = template.read() result = yacc.parse(template) print("result: \n \n", result) print(usednames) #if len(sys.argv)==4: #data = open(sys.argv[1], 'r') #template = open(sys.argv[2], 'r') #output = open(sys.argv[3] ,'w') #doTheJob(data,template,output) """ if __name__ == "__main__":
def scanamfile(amfile): """Scan automake (.am) file Returns ... """ amfile = "\n" + amfile #Add \n so you can guess vars tokens = ( "END", "COL", "EQ", "PEQ", "CVAR", "MVAR", "TEXT", "ENDTAB", "SPACE", "IF", "ELSE", "ENDIF", ) states = ( ("com", "exclusive"), #comment ("var", "inclusive"), ("if", "exclusive"), ) def t_begin_com(t): r"[ \t]*\#" t.lexer.begin("com") def t_com_other(t): r"[^\\\n]+" pass def t_com_lit(t): r"\\." pass def t_com_newline(t): r".*\\\n" t.lexer.lineno += 1 pass def t_ifbegin(t): #ugly hack to ensure that this is at the begining of the line and keep the newline token. #PLY doesn't support the "^" beginning of line regexp :,( r"\nif" t.type = "END" t.lexer.push_state("if") return t def t_if_IF(t): #http://www.gnu.org/s/hello/manual/automake/Usage-of-Conditionals.html#Usage-of-Conditionals r"[ \t]+[^ \n\t]*" t.value = t.value.strip() #take the variable to test t.lexer.pop_state() return t def t_ELSE(t): r"\nelse" return t def t_ENDIF(t): r"\nendif" return t def t_CVAR(t): #configure variable r"@.*?@" #not greedy return t def t_MVAR(t): #makefile variable r"\$\(.*?\)" return t def t_com_END(t): r"\n" t.lexer.begin("INITIAL") t.lexer.lineno += 1 return t def t_EQ(t): r"[ \t]*=[ \t]*" t.lexer.begin("var") t.value = t.value.strip() return t def t_PEQ(t): r"[ \t]*\+=[ \t]*" t.lexer.begin("var") t.value = t.value.strip() return t def t_contline(t): r"\\\n" t.lexer.lineno += 1 pass def t_litteral(t): r"\\." t.value = t.value[1] #take the literal char t.type = "TEXT" return t def t_COL(t): r"[ \t]*:[ \t]*" t.lexer.begin("var") return t def t_var_ENDTAB(t): r"[ \t]*;[ \t]*" return t def t_ENDTAB(t): r"[ \t]*\n\t[ \t]*" t.lexer.lineno += 1 return t def t_var_TEXT(t): r"[^ #\n\t,\$@\\]+" return t def t_TEXT(t): r"[^ \n\t:=\$@\\]+" return t def t_END(t): r"[ \t]*\n" t.lexer.lineno += t.value.count('\n') t.lexer.begin('INITIAL') return t def t_var_SPACE(t): r"[ \t]+" return t def t_space(t): r"[ \t]" pass def t_var_special(t): r"\$[^({]" t.type = "TEXT" return t def t_ANY_error(t): print("Illegal character '%s'" % t.value[0]) t.lexer.skip(1) lexer = lex.lex() #lexer.input(amfile) #for tok in lexer: # print(tok) #YACC stuff begins here def p_done(p): "done : vars end" p[0] = p[1] def p_vars(p): """ vars : vars end var | end var """ if len(p) == 4: p[1][0].update(p[3][0]) p[1][2].update(p[3][2]) p[0] = [p[1][0], p[1][1] + p[3][1], p[1][2]] else: p[0] = p[2] def p_if(p): """ var : IF vars ENDIF | IF vars ELSE vars ENDIF """ if len(p) == 4: p[0] = [{}, [], {p[1]: p[2]}] else: p[0] = [{}, [], {p[1]: p[2], "!" + p[1]: p[4]}] def p_var(p): """ var : textstr EQ textlst | textstr EQ | textstr PEQ textlst """ if p[2] == "=": if len(p) == 4: p[0] = [{p[1]: p[3]}, [], {}] else: p[0] = [{p[1]: []}, [], {}] else: p[0] = [{}, [[p[1], p[3]]], {}] def p_textlst(p): """ textlst : textlst spacestr textstr | textstr """ if len(p) == 4: p[0] = p[1] + [p[3]] else: p[0] = [p[1]] def p_teststr(p): """ textstr : textstr TEXT | textstr CVAR | textstr MVAR | TEXT | CVAR | MVAR """ if len(p) == 3: p[0] = p[1] + p[2] else: p[0] = p[1] def p_space(p): """ spacestr : spacestr SPACE | SPACE """ if len(p) == 3: p[0] = p[1] + p[2] else: p[0] = p[1] def p_end(p): """ end : end END | END """ def p_error(p): print("syntax error at '%s'" % p.type, p.value) pass yacc.yacc() variables = yacc.parse(amfile) return variables
def eval_expression(t): ops = operators.keys() assigns = assignment.keys() for el in t: if isinstance(el, str): if el in ops: return eval_op(t) if el in assigns: return eval_assignment(t) op = t[0] if op == 'block': eval(t[1]) eval(t[2]) return elif op == 'while': while eval(t[1]): eval(t[2]) elif op == 'for': # FOR [i = 0; i < 10; i += 1] THEN block eval(t[1]) while eval(t[2]): eval(t[4]) eval(t[3]) elif op == 'if': if eval(t[1]): eval(t[2]) elif eval(t[3]): pass else: eval(t[4]) elif op == 'elif': if eval(t[1]): eval(t[2]) return True elif len(t[3]) != 0: if eval(t[3]): return True return False elif op == 'else': eval(t[1]) elif op == 'echo': if t[1] == '__names': print('__names', str(names)) elif t[1] == '__preprocessor': print('__preprocessor', str(preprocessor)) else: print(str(eval(t[1]))) elif op == 'preprocessor': preprocessor[t[1]] = t[2] elif op == 'declare_function': functions[t[1]] = (t[2], t[3]) elif op == 'exec_function': if t[1] in functions: func = functions[t[1]] func_params, func_block = func[0], func[1] names_to_reset = {} i = 0 for param in func_params: if param in names: names_to_reset[param] = names[param] try: names[param] = eval(t[2][i]) except: names[param] = None finally: i += 1 res = eval(func_block) # Reset the context for param in names_to_reset: names[param] = names_to_reset[param] for param in func_params: del names[param] return res else: return None elif op == 'import': with open(t[1] + '.pypy') as imported_file: for imported_line in imported_file: yacc.parse(imported_line)
def p_uminus(p): 'expression : ADD_OP expression %prec UMINUS' #p[0] = operations[p[1]](0,p[2]) p[0] = AST.OpNode(p[1], [p[2]]) def p_error(p): print("syntax error in line %d" % p.lineno) yacc.errok() def parse(program): return yacc.parse(program) yacc.yacc(outputdir='generated') if __name__ == "__main__": import sys import os prog = open(sys.argv[1]).read() result = yacc.parse(prog, debug=1) #print(result) graph = result.makegraphicaltree() name = os.path.splitext(sys.argv[1])[0] + '-ast.pdf' graph.write_pdf(name) #print("wrote ast to", name)
def parse(cls, filename): data = cls.__read_input(filename) return yacc.parse(data)
pass def p_empty(p): 'empty : ' pass S = ' ' def p_error(p): global s if p: print('error de sintaxis en', p.value) else: print('error de sintaxis en EOF') print(s, 'no esta en el lenguaje') import ply.yacc as yacc yacc.yacc() while (1): try: s = input('> ') except EOFError: break if not s: continue t = yacc.parse(s)
def process(data): lexer = lex.lex() yacc.yacc() yacc.parse(data)
tempVarCount[STRING] = 37500 # Main if __name__ == '__main__': # Check for file if (len(sys.argv) > 1): file = sys.argv[1] # Open file try: f = open(file, 'r') data = f.read() f.close() # Parse the data if (yacc.parse(data, tracking = True) == 'OK'): print(dirProc); executeVirtualMachine(funcGlobal, quadruples, constants) except EOFError: print(EOFError) else: print('File missing') while 1: try: s = raw_input('') except EOFError: break if not s: continue yacc.parse(s)
#!/usr/bin/env python3 from ply import yacc import parser if __name__ == '__main__': code = input('> ') try: ast = yacc.parse(code) except SyntaxError as syntaxE: print('SyntaxError: {}'.format(syntaxE)) exit() try: ast.a_interp({}) except Exception as e: print('Error: {}'.format(e)) exit() try: ast.interp({}) except Exception as e: print('Error: {}'.format(e)) exit()
def parseFile(fname): global G_text f = open(fname) G_text = f.read() ast = bfrast.File(fname, yacc.parse(G_text, tracking=True)) return ast
# TODO: add def p_error(p) # def p_error(p): # if p: # print("Syntax error at '%s'" % p.value) # else: # print("Syntax error at EOF") # Input testing data = ''' clear X; # Ignore this line for testing purpose ''' lexer = lex.lex(debug=True) lexer.input(data) yacc.yacc(debug=True) result = yacc.parse(data) print(result) # lexer = lex.lex() # lexer.input(data) # while True: # current_token = lexer.token() # if not current_token: # break # print(current_token) #------------------------INTERPRETER------------------------- # class INTERPRETER():
respuesta = True break print "Has escogido \"%s\"" % files[int(numArchivo) - 1] return files[int(numArchivo) - 1] def traducir(result): graphFile = open('graphviztrhee.vz', 'w') graphFile.write(result.traducir()) graphFile.close() print "El programa traducido se guardo en \"graphviztrhee.vz\"" directorio = '/Users/sebas/Documents/Compiladores/pl0/analizador version 2/test/' archivo = buscarFicheros(directorio) test = directorio + archivo fp = codecs.open(test, "r", "utf-8") cadena = fp.read() fp.close() yacc.yacc() result = yacc.parse(cadena, debug=1) #result.imprimir(" ") #print result.traducir() traducir(result) #print result
pass def p_empty(p): ''' empty : ''' pass w = '' def p_error(p): global w if p: print("Error de sintaxis en '%s'" % p.value) print w, 'no está en el lenguaje' else: print('Error de sintaxis en EOF') print w, 'no está en el lenguaje' import ply.yacc as yacc yacc.yacc() while 1: try: w = raw_input('> ') except EOFError: break t = yacc.parse(w)
sys.exit() print(-1) sys.exit() lex.lex() yacc.yacc(start='programa') if __name__ == '__main__': if (len(sys.argv) > 1): file = sys.argv[1] try: f = open(file, 'r') data = f.read() f.close() if (yacc.parse(data, tracking=True)): print("Success") i = 0 pprint(currentProgram) for quad in currentProgram.cuadruplos: print(i.__str__() + quad.__str__()) i = i + 1 data = currentProgram.prepareData() print(json.dumps(data)) else: print("Error detected") print(-1) sys.exit() except EOFError: print(EOFError)
def p_error(p): print("Syntax error in input!") # Build the parser yacc.yacc() # while True: # try: # s = input('> ') # except EOFError: # break # if not s: continue # result = yacc.parse(s) # print(result) if __name__ == '__main__': try: arch_name = 'prueba2.txt' arch = open(arch_name, 'r') print("Nombre de archivo a leer: " + arch_name) info = arch.read() # print(info) arch.close() if (yacc.parse(info, tracking=True) == 'PROGRAM COMPILED'): print("correct syntax") else: print("syntax error") except EOFError: print(EOFError)
def scanacfile(acfile): """Scan a autoconfigure (.in/.ac) file. Returns .... """ tokens = ( "FUNC", "COMPFUNC", #complete func "FUNCOPT", #func options "FUNCEND", "VAR", "ECHO", "TEXT", "IF", "IFCOM", "ELIF", "ELSE", "THEN", "IFEND", "CASE", "CASEOPT", "COPTEND", #case opt end, doesn't need to be there but SHOULD "CASEEND", "COMMA", ) states = ( ("func", "inclusive"), ("funcopt", "exclusive"), ("case", "inclusive"), ("if", "inclusive"), ("shellcom", "exclusive"), ) def t_contline(t): r"\\\n" t.lexer.lineno += 1 pass def t_ANY_space(t): r"[ \t]" pass def t_newline(t): r"\n" t.lexer.lineno += 1 pass def t_shfunc(t): #shell func r'[a-zA-Z_][a-zA-Z0-9_]*\(\)[ \t]*{' t.lexer.level = 1 t.lexer.push_state("shellcom") def t_shellcom_text(t): r"[^{}]+" def t_shellcom_opb(t): r"{" t.lexer.level += 1 def t_shellcom_opc(t): r"}" t.lexer.level -= 1 if t.lexer.level == 0: t.lexer.pop_state() pass def t_COMPFUNC(t): r'[a-zA-Z_][a-zA-Z0-9_]*\([^\\[\](\),]*\)' values = t.value.split("(") t.value = [values[0], values[1][:-1]] return t def t_FUNC(t): r'[a-zA-Z_][a-zA-Z0-9_]*\(' t.lexer.push_state('func') t.value = t.value[:-1] #return name of func return t def t_func_funcopt(t): r'\[' t.lexer.code_start = t.lexer.lexpos # Record the starting position t.lexer.level = 1 # Initial level t.lexer.push_state('funcopt') # Enter 'ccode' state # Rules for the ccode state def t_funcopt_newcom(t): r'\[' t.lexer.level += 1 def t_funcopt_endcom(t): r'\]' t.lexer.level -= 1 # If closing command, return the code fragment if t.lexer.level == 0: t.value = t.lexer.lexdata[t.lexer.code_start - 1:t.lexer.lexpos] t.type = "FUNCOPT" t.lexer.lineno += t.value.count('\n') t.lexer.pop_state() return t def t_funcopt_opt(t): r"[^\\\[\]]+" def t_funcopt_contline(t): r"\\\n" def t_func_COMMA(t): r"," return t def t_func_FUNCEND(t): r"\)" t.lexer.pop_state() return t def t_comment(t): r"(dnl|\#).*\n" t.lexer.lineno += t.value.count('\n') pass def t_ECHO(t): r"echo.*\n" t.lexer.lineno += t.value.count('\n') return t def t_VAR(t): #take var=text, var="text text", var='text text', var=`text text` r"[a-zA-Z_][a-zA-Z0-9_]*=(\"[^\"]*\"|\'[^\']*\'|\`[^\`]*\`|[^() \t,\n]*)+" t.lexer.lineno += t.value.count('\n') return t def t_IF(t): r"if" t.lexer.push_state("if") return t def t_ELIF(t): r"elif" t.lexer.push_state("if") return t def t_if_THEN(t): r"then" t.lexer.pop_state() return t def t_if_IFCOM(t): r"[^ \t\n]+" return t def t_ELSE(t): r"else" return t def t_IFEND(t): r"fi" return t def t_CASE(t): r"case.*in" t.lexer.push_state("case") return t def t_CASEEND(t): r"esac" t.lexer.pop_state() return t def t_case_CASEOPT(t): r"[^\n\t\(\)]+\)" return t def t_case_COPTEND(t): r";;" return t def t_literal(t): r"\\[^\n]" t.type = "TEXT" t.value = t.value[-1] #return litral char return t def t_TEXT(t): #most likely commands like "AM_INIT_AUTOMAKE" etc. #Fix this so I can handle variables like the one above as that is NOT a text string r"([^ ;,\t\n\(\)]+|\([^() \t\n]*\))" return t def t_ANY_error(t): print("Illegal character '%s'" % t.value[0], t.lexer.lineno) t.lexer.skip(1) lexer = lex.lex() #lexer.input(acfile) #for tok in lexer: # print(tok) #YACC stuff begins here def p_complst(p): """ complst : complst text | complst ECHO | complst func | complst VAR | complst ifcomp | complst case | complst FUNCOPT | text | ECHO | func | VAR | ifcomp | case | FUNCOPT """ if len(p) == 3: p[0] = p[1] + [p[2]] else: p[0] = [p[1]] def p_text(p): """ text : text TEXT | TEXT """ if len(p) == 3: p[0] = p[1] + " " + p[2] else: p[0] = p[1] def p_case(p): """ case : CASE caseopt CASEEND """ p[0] = [p[1]] + [p[2]] def p_caseopt(p): """ caseopt : caseopt CASEOPT complst COPTEND | CASEOPT complst COPTEND """ if len(p) == 5: p[0] = p[1] + [p[2], p[3]] else: p[0] = [p[1], p[2]] def p_caseopt2(p): """ caseopt : caseopt CASEOPT complst | caseopt CASEOPT COPTEND | CASEOPT complst | CASEOPT COPTEND """ if len(p) == 4: if isinstance(p[3], list): p[0] = p[1] + [p[2], p[3]] else: p[0] = p[1] + [p[2], []] else: if isinstance(p[2], list): p[0] = [p[1], p[2]] else: p[0] = [p[1], []] def p_ifcomp(p): #perhaps needs elif also """ ifcomp : if IFEND """ p[0] = p[1] def p_if(p): """ if : if ELSE complst | IF ifcom THEN complst | if ELIF ifcom THEN complst """ if len(p) == 5: p[0] = [[p[1]] + [p[2]], p[4]] elif len(p) == 6: p[0] = p[1] + [[p[2]] + [p[3]], p[5]] else: p[0] = p[1] + [[p[2]], p[3]] def p_ifcom(p): """ ifcom : ifcom IFCOM | IFCOM """ if len(p) == 3: p[0] = p[1] + [p[2]] else: p[0] = [p[1]] def p_func(p): """ func : FUNC funcopt FUNCEND | COMPFUNC """ if len(p) == 2: p[0] = p[1] #this is already ordered else: p[0] = [p[1], p[2]] def p_funccomma(p): """ funcopt : funcopt COMMA | COMMA complst | COMMA """ if len(p) == 3: if isinstance(p[2], list): if len(p[2]) > 1: p[0] = [[]] + [p[2]] else: p[0] = [[]] + p[2] else: p[0] = p[1] + [[]] else: p[0] = [[]] def p_funcopt(p): """ funcopt : funcopt COMMA complst | complst """ if len(p) == 4: if len(p[3]) > 1: p[0] = p[1] + [p[3]] else: p[0] = p[1] + p[3] else: if len(p[1]) > 1: p[0] = [p[1]] else: p[0] = p[1] def p_error(p): print("syntax error at '%s'" % p.type, p.value) pass yacc.yacc() items = yacc.parse(acfile) return items
def parse(s): return yacc.parse(s)
print("Sytax error: unexpected end of file!") precedence = ( ('left', 'ADD_OP'), ('left', 'MUL_OP'), ('right', 'UMINUS'), ) def parse(program): return yacc.parse(program) yacc.yacc(outputdir='generated') if __name__ == "__main__": import sys prog = open(sys.argv[1]).read() result = yacc.parse(prog) if result: print(result) import os graph = result.makegraphicaltree() name = os.path.splitext(sys.argv[1])[0] + '-ast.pdf' graph.write_pdf(name) print("wrote ast to", name) else: print("Parsing returned no result!")
declare number x; if(y > 2.0){ y = y + 1.0; }else{ y = y * 2.0; } return y; } begin { kread(w); if(!(x < 1.0)){ kprint(s,1.0); } if((x < 1.0) & (s == b)){ s.wordCount(); } while(x > 10.0){ x = x - 1.0; } do{ x = x / 2.0; }while(x > 20.0); } end''' yacc.parse(data)
def p_expression_number(p): """expression : NUMBER""" p[0] = p[1] def p_expression_string(p): """expression : STRING""" p[0] = p[1] def p_expression_name(p): """expression : NAME""" p[0] = p[1] def p_error(p): if p is not None: print("Erreur de syntaxe à la ligne %s" % p.lineno, p) yacc.yacc() with open('code.pypy') as file: for line in file: yacc.parse(line) program[1] = tuple(program[1]) printTreeGraph(tuple(program))
def parse(program): return yacc.parse(program)