Example #1
0
    t.skip = True
    return t
t_NEWLINE.__doc__ = yare.loop_('\n')

# Whitespaces
def t_WHITESPACE(t):
    t.skip = True
    return t
t_WHITESPACE.__doc__ = yare.loop_(yare.select([' ', '\t']))

# Identifiers
t_ID = yare.concat([
    yare.select([yare.LOWERCASE, yare.UPPERCASE]),
    yare.loop(
        yare.select([
            yare.LOWERCASE, yare.UPPERCASE,
            yare.escape('_'), yare.DIGIT
        ])
    )
])

lexer = lex.lex()

if __name__ == '__main__':
    import os.path as p
    with open(p.join(p.dirname(__file__), 'cmm_input.c'), 'r') as f:
        s = f.read()
    lexer.set_string(s)
    for token in lexer.get_next_token():
        print token
Example #2
0
    '/'
])

# Comment (C++-Style)
def t_CPPCOMMENT(t):
    t.lexer.lineno += 1
    return t

t_CPPCOMMENT.__doc__ = concat([
    '/',
    '/',
    loop(diff(['\n'])),
    '\n',
])

# Newline
def t_NEWLINE(t):
    t.lexer.lineno += 1
    return t

t_NEWLINE.__doc__ = escape('\n')

scanner = lex.lex()

import os.path as p
with open(p.join(p.dirname(__file__), 'temp.c'), 'r') as f:
    s = f.read()
scanner.set_string(s)
for token in scanner.get_next_token():
    print token
Example #3
0
t_NEWLINE.__doc__ = yare.loop_('\n')


# Whitespaces
def t_WHITESPACE(t):
    t.skip = True
    return t


t_WHITESPACE.__doc__ = yare.loop_(yare.select([' ', '\t']))

# Identifiers
t_ID = yare.concat([
    yare.select([yare.LOWERCASE, yare.UPPERCASE]),
    yare.loop(
        yare.select(
            [yare.LOWERCASE, yare.UPPERCASE,
             yare.escape('_'), yare.DIGIT]))
])

lexer = lex.lex()

if __name__ == '__main__':
    import os.path as p
    with open(p.join(p.dirname(__file__), 'cmm_input.c'), 'r') as f:
        s = f.read()
    lexer.set_string(s)
    for token in lexer.get_next_token():
        print token
Example #4
0
# Comment (C++-Style)
def t_CPPCOMMENT(t):
    t.lexer.lineno += 1
    return t


t_CPPCOMMENT.__doc__ = concat([
    '/',
    '/',
    loop(diff(['\n'])),
    '\n',
])


# Newline
def t_NEWLINE(t):
    t.lexer.lineno += 1
    return t


t_NEWLINE.__doc__ = escape('\n')

scanner = lex.lex()

import os.path as p
with open(p.join(p.dirname(__file__), 'temp.c'), 'r') as f:
    s = f.read()
scanner.set_string(s)
for token in scanner.get_next_token():
    print token