Example #1
0
 def __new__(cls, lineno, type, value):
     return tuple.__new__(cls, (lineno, intern(str(type)), value))
Example #2
0
# we use the unicode identifier rule if this python version is able
# to handle unicode identifiers, otherwise the standard ASCII one.
try:
    compile('föö', '<unknown>', 'eval')
except SyntaxError:
    name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
else:
    from jinja2 import _stringdefs
    name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
                                         _stringdefs.xid_continue))

float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')

# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
Example #3
0
    check_ident = False
else:
    # Unicode support, build a pattern to match valid characters, and set flag
    # to use str.isidentifier to validate during lexing
    from jinja2 import _identifier
    name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
    check_ident = True
    # remove the pattern from memory after building the regex
    import sys
    del sys.modules['jinja2._identifier']
    import jinja2
    del jinja2._identifier
    del _identifier

# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
Example #4
0
 def __new__(cls, lineno, type, value):
     return tuple.__new__(cls, (lineno, intern(str(type)), value))
Example #5
0
    name_re = re.compile(r"[\w{0}]+".format(_identifier.pattern))
    check_ident = True
    # remove the pattern from memory after building the regex
    import sys

    del sys.modules["jinja2._identifier"]
    import jinja2

    del jinja2._identifier
    del _identifier

float_re = re.compile(r"(?<!\.)\d+\.\d+")
newline_re = re.compile(r"(\r\n|\r|\n)")

# internal the tokens and keep references to them
TOKEN_ADD = intern("add")
TOKEN_ASSIGN = intern("assign")
TOKEN_COLON = intern("colon")
TOKEN_COMMA = intern("comma")
TOKEN_DIV = intern("div")
TOKEN_DOT = intern("dot")
TOKEN_EQ = intern("eq")
TOKEN_FLOORDIV = intern("floordiv")
TOKEN_GT = intern("gt")
TOKEN_GTEQ = intern("gteq")
TOKEN_LBRACE = intern("lbrace")
TOKEN_LBRACKET = intern("lbracket")
TOKEN_LPAREN = intern("lparen")
TOKEN_LT = intern("lt")
TOKEN_LTEQ = intern("lteq")
TOKEN_MOD = intern("mod")