def __init__(self, script_text, method_registry, logger, conf_paths=None): self.method_registry = method_registry self.logger = logger self.conf_paths = conf_paths if not self.conf_paths: self.conf_paths = [] self.node_map = {} self.conf_map = {} self.__conf_gen_list__ = [] # to record the last loading time stamp for each conf file self.conf_update_ts = {} self.__load_confs__() lexer = lex.lex(module=compile_lexer) syntax = yacc.yacc(module=compile_syntax) syntax.__define_node__ = self.__define_node__ if logger: syntax.__logger__ = logger parser = preparser.Parser() for c in script_text: parser.feed(c) parser.close() self.logger.warning('final shuvi script[%s]' % parser.get_final_script()) syntax.parse(parser.get_final_script(), lexer=lexer)
def Run(): data = main_text.get(1.0, tkinter.END) lexer = lex.lex(optimize=1) lexer.input(data) while True: tok = lexer.token() if not tok: break # print(tok) parser = yacc.yacc() result = parser.parse(data) pop_up.showinfo("Done", "Program Run With No More Error")
def __init__(self): self.lex = lex.lex(object=self) self.line_head_pos = 0
def p_empty(p): """ empty : """ pass def p_error(p): print("syntax error", p.lineno) if __name__ == "__main__": lexer = lex.lex(module=lexer_lex) parser = yacc.yacc(debug=True) with open(sys.argv[1], "rt") as f: data = f.read() lexer.input(data) result = parser.parse(data) print(result) print() print() print() print("Printing SYMBOL_TABLE") print(symtab) print() print()
# Ignored characters t_ignore = " \t" def t_newline(t): r'\n+' t.lexer.lineno += t.value.count("\n") def t_error(t): print(f"Illegal character {t.value[0]!r}") t.lexer.skip(1) # Build the lexer lex.lex() def p_statement_expr(p): 'statement : expression' p[0] = p[1] def p_expression_binop(p): '''expression : expression PLUS expression | expression MINUS expression | expression TIMES expression | expression DIVIDE expression''' if p[2] == '+': p[0] = p[1] + p[3] elif p[2] == '-':
# Define a rule so we can track line numbers def t_newline(t): r'\n+' t.lexer.lineno += len(t.value) # A string containing ignored characters (spaces and tabs) t_ignore = ' \t' # Error handling rule def t_error(t): print("Illegal character '%s'" % t.value[0]) t.lexer.skip(1) # Build the lexer lexer = lex.lex() #x = input('write something\n') #lexer.input(x) # Test it out #data = 'boolean int double string' #data = '''1212True''' #data += ''' 123.12 123 2532a23432432'yolo' ''' #data += '''+ - * / ( ) {} [] == = < > " : ; & | .''' #for r in reserved: # data += ' ' + r #data += ''' \'#jkdadjksb[] #qwewqe\''''
numArchivo == raw_input('\nNumero del test: ') for file in files: if file == files[int(numArchivo)-1]: respuesta = True break print ("Has escogido ", files[int(numArchivo)-1]) return files[int(numArchivo)-1] """ print(str(os.getcwd()) + "/code/") directorio = str(os.getcwd()) + "/code/" archivo = 'program.unimag' test = directorio + archivo print(test) fp = codecs.open(test, "r", "utf-8") cadena = fp.read() fp.close() analizador = lex.lex() analizador.input(cadena) """ while True: tok = analizador.token() if not tok : break print tok """