def parse(data): global prop prop = [] lexer = lex.lex() lexer.input(data) import lexer.ply.yacc as yacc parser = yacc.yacc() parser.parse(data, tracking=True) return prop
def tokenize(data): lexer = lex.lex() lexer.input(data) res = [] while True: token = lexer.token() if not token: break res += [(token.type, token.value)] return res
def yaccing(data, get_errros=True): global erreurs, lignes erreurs = [] lignes = {} lexer = lex.lex() lexer.input(data) import lexer.ply.yacc as yacc parser = yacc.yacc() parser.parse(data, tracking=True) return [erreurs, lignes] if get_errros else lignes
def yaccing(data, get_errros=True): global erreurs, lignes erreurs = [] lignes = {} lexer = lex.lex() lexer.input(data) import lexer.ply.yacc as yacc parser = yacc.yacc() parser.parse(data, tracking=True) # parser.parse(data) # for i in sorted([int(i) for i in list(lignes.keys())]): # print("ligne numero %s: %s" % (i + 1, lignes[str(i)]), "\n\n") return [erreurs, lignes] if get_errros else lignes
def tokenize(data): lexer = lex.lex() lexer.input(data) res = [] if text != "": lexPly.set_data_to_parse(text) while True: token = lexer.token() if not token: break if lexPly.is_function(token.value): res += [("KNOWN_FUNC", token.value)] else: res += [(token.type, token.value)] return res
def lexing(word): lexer = lex.lex() lexer.input(word) token = lexer.token() return token.type.lower() if token else ""
def build_lexer(self): self.lexer = lex.lex(module=self.callback, reflags=re.UNICODE)
t_RCURLY = '}' def t_SECTION(t): r'\#\#\#(.|\n)*' # line number information used to ensure tracebacks refer to the correct line t.lineno = t.lexer.lineno return t def t_COMMENT(t): r'//[^\n]*\n|/[*](.|\n)*?[*]/' t.lexer.lineno += t.value.count('\n') def t_NL(t): r'\n' t.lexer.lineno += 1 return 0 t_ignore = " \t\r" def t_error(t): raise Exception("Illegal character in grammar: %r in %r" % (t.value[0], t.value[:10])) lexer = lex.lex(lextab=LEX_TAB_MODULE)