Beispiel #1
0
 def generate_tokens(source_code, _=None):
     regx_regx = r"|/(?:\\.|[^/])+?/[igm]*"
     regx_pattern = re.compile(regx_regx)
     word_pattern = re.compile(r'\w+')
     tokens = CodeReader.generate_tokens(source_code, regx_regx)
     leading_by_word = False
     for token in tokens:
         if leading_by_word and regx_pattern.match(token):
             for subtoken in CodeReader.generate_tokens(token):
                 yield subtoken
         else:
             yield token
         if not token.isspace():
             leading_by_word = word_pattern.match(token)
Beispiel #2
0
 def generate_tokens(source_code, _=None):
     return CodeReader.generate_tokens(source_code, r"|\'\'\'.*?\'\'\'" + r"|\"\"\".*?\"\"\"")
 def generate_tokens(source_code, _=None):
     return CodeReader.generate_tokens(
         source_code,
         r'|' + r'|'.join(re.escape(s) for s in (
             '..', '->', '<@', '@>', '@lazy', '@fuzzy',
             '@index', '@deterministic')))