Example #1
0
    def filter_stream(self, stream):
        """
        We convert 
        {{ some.variable | filter1 | filter 2}}
            to 
        {{ some.variable | filter1 | filter 2 | bind}}
        
        ... for all variable declarations in the template

        This function is called by jinja2 immediately 
        after the lexing stage, but before the parser is called. 
        """
        while not stream.eos:
            token = next(stream)
            if token.test("variable_begin"):
                var_expr = []
                while not token.test("variable_end"):
                    var_expr.append(token)
                    token = next(stream)
                variable_end = token

                last_token = var_expr[-1]
                if (not last_token.test("name") 
                    or not last_token.value in ('bind', 'inclause', 'sqlsafe')):
                    # don't bind twice
                    var_expr.append(Token(10, 'pipe', u'|'))
                    var_expr.append(Token(10, 'name', u'bind'))

                var_expr.append(variable_end)

                for token in var_expr:
                    yield token
            else:
                yield token
    def filter_stream(self, stream):
        """
        We convert
        {{ some.variable | filter1 | filter 2}}
            to
        {{ some.variable | filter1 | filter 2 | yaml}}

        ... for all variable declarations in the template

        This function is called by jinja2 immediately
        after the lexing stage, but before the parser is called.
        """
        while not stream.eos:
            token = next(stream)
            if token.test('variable_begin'):
                var_expr = []
                while not token.test('variable_end'):
                    var_expr.append(token)
                    token = next(stream)
                variable_end = token

                last_token = var_expr[-1]
                if last_token.test('name') and last_token.value == 'yaml':
                    # don't yaml twice
                    continue

                var_expr.append(Token(10, 'pipe', u'|'))
                var_expr.append(Token(10, 'name', u'yaml'))

                var_expr.append(variable_end)
                for token in var_expr:
                    yield token
            else:
                yield token
Example #3
0
    def filter_stream(self, stream: "TokenStream") -> Iterable[Token]:
        """Modify the colon to lparen and rparen tokens"""
        # expect a colon
        # 0: don't expect to change any {{a | filter: arg}}
        #    to {{a | filter(arg)}}
        # 1: expect a filter
        # 2: expect the colon
        # 3: expect rparen
        flag = 0

        for token in stream:
            # print(token.value, token.type)
            if flag == 0 and token.type is TOKEN_PIPE:
                flag = 1
                yield token
            elif token.type is TOKEN_NAME and flag == 1:
                flag = 2
                yield token
            elif token.type is TOKEN_COLON and flag == 2:
                flag = 3
                yield Token(token.lineno, TOKEN_LPAREN, None)
            elif token.type is TOKEN_COLON and flag == 3:
                # {{ a | filter: 1, x: 2}} => {{ a | filter: 1, x=2}}
                yield Token(token.lineno, TOKEN_ASSIGN, None)
            elif (
                token.type in (TOKEN_VARIABLE_END, TOKEN_BLOCK_END, TOKEN_PIPE)
                and flag == 3
            ):
                flag = 1 if token.type is TOKEN_PIPE else 0
                yield Token(token.lineno, TOKEN_RPAREN, None)
                yield token
            else:
                yield token
Example #4
0
    def filter_stream(self, stream: TokenStream):
        for token in stream:
            if token.type != "data":
                yield token
                continue

            pos = 0
            lineno = token.lineno

            while match := REGEX_DUNDER.search(token.value, pos):
                new_pos = match.start()

                if new_pos > pos:
                    preval = token.value[pos:new_pos]
                    yield Token(lineno, "data", preval)
                    lineno += count_newlines(preval)

                yield Token(lineno, "variable_begin", "")
                yield Token(lineno, "name", match[1])
                yield Token(lineno, "pipe", "|")
                yield Token(lineno, "name", "default")
                yield Token(lineno, "lparen", "(")
                yield Token(lineno, "string", match[0])
                yield Token(lineno, "rparen", ")")
                yield Token(lineno, "variable_end", "")

                pos = match.end()

            if pos < len(token.value):
                yield Token(lineno, "data", token.value[pos:])
Example #5
0
    def filter_stream(self, stream):
        in_trans = False
        in_variable = False
        for token in stream:
            # Check if we are inside a trans block - we cannot use filters there!
            if token.type == 'block_begin':
                block_name = stream.current.value
                if block_name == 'trans':
                    in_trans = True
                elif block_name == 'endtrans':
                    in_trans = False
            elif token.type == 'variable_begin':
                in_variable = True

            if in_variable and not in_trans:
                if token.type == 'variable_end' or (token.type == 'name'
                                                    and token.value == 'if'):
                    yield Token(token.lineno, 'pipe', '|')
                    yield Token(token.lineno, 'name', 'latex')

            if token.type == 'variable_end':
                in_variable = False

            # Original token
            yield token
Example #6
0
class TokenStreamTestCase(JinjaTestCase):
    test_tokens = [
        Token(1, TOKEN_BLOCK_BEGIN, ''),
        Token(2, TOKEN_BLOCK_END, ''),
    ]

    def test_simple(self):
        ts = TokenStream(self.test_tokens, "foo", "bar")
        assert ts.current.type is TOKEN_BLOCK_BEGIN
        assert bool(ts)
        assert not bool(ts.eos)
        next(ts)
        assert ts.current.type is TOKEN_BLOCK_END
        assert bool(ts)
        assert not bool(ts.eos)
        next(ts)
        assert ts.current.type is TOKEN_EOF
        assert not bool(ts)
        assert bool(ts.eos)

    def test_iter(self):
        token_types = [
            t.type for t in TokenStream(self.test_tokens, "foo", "bar")
        ]
        assert token_types == [
            'block_begin',
            'block_end',
        ]
Example #7
0
class TestTokenStream:
    test_tokens = [
        Token(1, TOKEN_BLOCK_BEGIN, ""),
        Token(2, TOKEN_BLOCK_END, ""),
    ]

    def test_simple(self, env):
        ts = TokenStream(self.test_tokens, "foo", "bar")
        assert ts.current.type is TOKEN_BLOCK_BEGIN
        assert bool(ts)
        assert not bool(ts.eos)
        next(ts)
        assert ts.current.type is TOKEN_BLOCK_END
        assert bool(ts)
        assert not bool(ts.eos)
        next(ts)
        assert ts.current.type is TOKEN_EOF
        assert not bool(ts)
        assert bool(ts.eos)

    def test_iter(self, env):
        token_types = [
            t.type for t in TokenStream(self.test_tokens, "foo", "bar")
        ]
        assert token_types == [
            "block_begin",
            "block_end",
        ]
Example #8
0
 def _template_call(stream, function_name):
     lineno = stream.current.lineno
     yield from (
         Token(lineno, "variable_begin", "{{"),
         Token(lineno, "name", function_name),
         Token(lineno, "lparen", "("),
         Token(lineno, "rparen", ")"),
         Token(lineno, "variable_end", "}}"),
     )
Example #9
0
 def filter_stream(self, stream):
     for token in stream:
         if token.type == 'variable_end':
             yield Token(token.lineno, 'rparen', ')')
             yield Token(token.lineno, 'pipe', '|')
             yield Token(token.lineno, 'name', 'assert_safe')
         yield token
         if token.type == 'variable_begin':
             yield Token(token.lineno, 'lparen', '(')
Example #10
0
    def filter_stream(self, stream):
        # The token stream looks like this:
        # ------------------------
        # variable_begin {{
        # name           event
        # dot            .
        # name           getTitle
        # lparen         (
        # rparen         )
        # pipe           |
        # name           safe
        # variable_end   }}
        # ------------------------
        # Intercepting the end of the actual variable is hard but it's rather easy to get the end of
        # the variable tag or the start of the first filter. As filters are optional we need to check
        # both cases. If we inject the code before the first filter we *probably* don't need to run
        # it again later assuming our filters are nice and only return unicode. If that's not the
        # case we can simply remove the `variable_done` checks.
        # Due to the way Jinja works it is pretty much impossible to apply the filter to arguments
        # passed inside a {% trans foo=..., bar=... %} argument list - we have nothing to detect the
        # end of an argument as the 'comma' token might be inside a function call. So in that case#
        # people simply need to unicodify the strings manually. :(

        variable_done = False
        in_trans = False
        in_variable = False
        for token in stream:
            # Check if we are inside a trans block - we cannot use filters there!
            if token.type == 'block_begin':
                block_name = stream.current.value
                if block_name == 'trans':
                    in_trans = True
                elif block_name == 'endtrans':
                    in_trans = False
            elif token.type == 'variable_begin':
                in_variable = True

            if not in_trans and in_variable:
                if token.type == 'pipe':
                    # Inject our filter call before the first filter
                    yield Token(token.lineno, 'pipe', '|')
                    yield Token(token.lineno, 'name', 'ensure_unicode')
                    variable_done = True
                elif token.type == 'variable_end' or (token.type == 'name'
                                                      and token.value == 'if'):
                    if not variable_done:
                        # Inject our filter call if we haven't injected it right after the variable
                        yield Token(token.lineno, 'pipe', '|')
                        yield Token(token.lineno, 'name', 'ensure_unicode')
                    variable_done = False

            if token.type == 'variable_end':
                in_variable = False

            # Original token
            yield token
Example #11
0
    def filter_njk_stream(self, stream):
        for token in stream:

            # patch strict equality operator `===`
            if token.test("eq:==") and stream.current.test("assign:="):
                yield Token(token.lineno, "name", "is")
                yield Token(token.lineno, "name", "sameas")
                stream.skip(1)
            else:
                yield token
Example #12
0
    def filter_stream(self, stream):
        djedi_init = [
            Token(0, 'block_begin', '{%'),
            Token(0, 'name', DJEDI_INIT_TAG),
            Token(0, 'block_end', '%}'),
            Token(0, 'data', '\n')
        ]
        for token in djedi_init:
            yield token

        for token in stream:
            yield token
Example #13
0
    def filter_stream(self, stream):
        paren_stack = 0

        for token in stream:
            if token.type != "data":
                yield token
                continue

            pos = 0
            lineno = token.lineno

            while 1:
                if not paren_stack:
                    match = _outside_re.search(token.value, pos)
                else:
                    match = _inside_re.search(token.value, pos)
                if match is None:
                    break
                new_pos = match.start()
                if new_pos > pos:
                    preval = token.value[pos:new_pos]
                    yield Token(lineno, "data", preval)
                    lineno += count_newlines(preval)
                gtok = match.group()
                if gtok[0] == "\\":
                    yield Token(lineno, "data", gtok[1:])
                elif not paren_stack:
                    yield Token(lineno, "block_begin", None)
                    yield Token(lineno, "name", "trans")
                    yield Token(lineno, "block_end", None)
                    paren_stack = 1
                else:
                    if gtok == "(" or paren_stack > 1:
                        yield Token(lineno, "data", gtok)
                    paren_stack += gtok == ")" and -1 or 1
                    if not paren_stack:
                        yield Token(lineno, "block_begin", None)
                        yield Token(lineno, "name", "endtrans")
                        yield Token(lineno, "block_end", None)
                pos = match.end()

            if pos < len(token.value):
                yield Token(lineno, "data", token.value[pos:])

        if paren_stack:
            raise TemplateSyntaxError(
                "unclosed gettext expression",
                token.lineno,
                stream.name,
                stream.filename,
            )
Example #14
0
    def filter_stream(self, stream):
        if not self.custom_autoescape_enabled:
            if hasattr(self.environment, 'custom_autoescape_filter_name'):
                if hasattr(self.environment, 'custom_select_autoescape'):
                    self.custom_autoescape_enabled = True

        autoescape = self.custom_autoescape_enabled and self.environment.custom_select_autoescape(
            stream.name)

        for token in stream:
            if token.type == 'variable_end' and autoescape:
                yield Token(token.lineno, 'pipe', '|')
                yield Token(token.lineno, 'name',
                            self.environment.custom_autoescape_filter_name)
            yield token
Example #15
0
    def filter_stream(self, stream):
        paren_stack = 0

        for token in stream:
            if token.type is not 'data':
                yield token
                continue

            pos = 0
            lineno = token.lineno

            while 1:
                if not paren_stack:
                    match = _outside_re.search(token.value, pos)
                else:
                    match = _inside_re.search(token.value, pos)
                if match is None:
                    break
                new_pos = match.start()
                if new_pos > pos:
                    preval = token.value[pos:new_pos]
                    yield Token(lineno, 'data', preval)
                    lineno += count_newlines(preval)
                gtok = match.group()
                if gtok[0] == '\\':
                    yield Token(lineno, 'data', gtok[1:])
                elif not paren_stack:
                    yield Token(lineno, 'block_begin', None)
                    yield Token(lineno, 'name', 'trans')
                    yield Token(lineno, 'block_end', None)
                    paren_stack = 1
                else:
                    if gtok == '(' or paren_stack > 1:
                        yield Token(lineno, 'data', gtok)
                    paren_stack += gtok == ')' and -1 or 1
                    if not paren_stack:
                        yield Token(lineno, 'block_begin', None)
                        yield Token(lineno, 'name', 'endtrans')
                        yield Token(lineno, 'block_end', None)
                pos = match.end()

            if pos < len(token.value):
                yield Token(lineno, 'data', token.value[pos:])

        if paren_stack:
            raise TemplateSyntaxError('unclosed gettext expression',
                                      token.lineno, stream.name,
                                      stream.filename)
Example #16
0
 def filter_stream(self, stream):
     ctx = StreamProcessContext(stream)
     strip_depth = 0
     while True:
         if stream.current.type == 'block_begin':
             if stream.look().test('name:strip') or \
                stream.look().test('name:endstrip'):
                 stream.skip()
                 if stream.current.value == 'strip':
                     strip_depth += 1
                 else:
                     strip_depth -= 1
                     if strip_depth < 0:
                         ctx.fail('Unexpected tag endstrip')
                 stream.skip()
                 if stream.current.type != 'block_end':
                     ctx.fail('expected end of block, got %s' %
                              describe_token(stream.current))
                 stream.skip()
         if strip_depth > 0 and stream.current.type == 'data':
             ctx.token = stream.current
             value = self.normalize(ctx)
             yield Token(stream.current.lineno, 'data', value)
         else:
             yield stream.current
         next(stream)
Example #17
0
 def filter_stream(self, stream):
     compressor = Compressor()
     strip_depth = 0
     while 1:
         if stream.current.type == 'block_begin':
             if stream.look().test('name:strip') or \
                stream.look().test('name:endstrip'):
                 stream.skip()
                 if stream.current.value == 'strip':
                     strip_depth += 1
                 else:
                     strip_depth -= 1
                     if strip_depth < 0:
                         self.fail('Unexpected tag endstrip', stream, token)
                 stream.skip()
                 if stream.current.type != 'block_end':
                     self.fail('expected end of block, got %s' %
                              describe_token(stream.current), stream, token)
                 stream.skip()
         if strip_depth > 0 and stream.current.type == 'data':
             token = stream.current
             try:
                 value = compressor.compress(token.value)
             except CompressError as e:
                 self.fail(e.message, stream, token)
             else:
                 yield Token(stream.current.lineno, 'data', value)
         else:
             yield stream.current
         next(stream)
 def filter_stream(self, stream):
     ctx = StreamProcessContext(stream)
     strip_depth = 0
     while True:
         if stream.current.type == "block_begin":
             if stream.look().test("name:strip") or stream.look().test(
                     "name:endstrip"):
                 stream.skip()
                 if stream.current.value == "strip":
                     strip_depth += 1
                 else:
                     strip_depth -= 1
                     if strip_depth < 0:
                         ctx.fail("Unexpected tag endstrip")
                 stream.skip()
                 if stream.current.type != "block_end":
                     ctx.fail("expected end of block, got %s" %
                              describe_token(stream.current))
                 stream.skip()
         if strip_depth > 0 and stream.current.type == "data":
             ctx.token = stream.current
             value = self.normalize(ctx)
             yield Token(stream.current.lineno, "data", value)
         else:
             yield stream.current
         next(stream)
Example #19
0
    def filter_stream(self, stream):

        self.environment.epfl_compoext_data[stream.name] = {
            "component_exports": [],  # these values are accessed from
            "component_parents": {},  # epfl-page
            "component_children": {}
        }

        lineno = 0
        for token in stream:
            yield token
            lineno = token.lineno + 1

        yield Token(lineno, 'block_begin', None)
        yield Token(lineno, 'name', 'component_exports')
        yield Token(lineno, 'block_end', None)
Example #20
0
    def _visit_data(stream, state):
        if not state["processing"]:
            yield from SqlvmExtension._pass_along_visitor(stream, state)
            return

        lines = stream.current.value.split("\n")
        for line in lines[:-1]:
            if line and not line.isspace():
                yield Token(stream.current.lineno, "data", line)
                state["statement_started"] = True

            if state["statement_started"]:
                yield from SqlvmExtension._template_call(stream, "statement")
                state["statement_started"] = False

        yield Token(stream.current.lineno, "data", lines[-1])
        next(stream)
Example #21
0
    def next_if_name(self, stream):
        name = ''
        while stream.current.test(TOKEN_SUB) or stream.current.test(
                TOKEN_NAME) or stream.current.test(
                    TOKEN_COLON) or stream.current.test(
                        TOKEN_DOT) or stream.current.test(TOKEN_DOT):
            name += next(stream).value

        return Token(stream.current.lineno, TOKEN_NAME, name) if name else None
 def filter_stream(self, stream):
     ctx = StreamProcessContext(stream)
     for token in stream:
         if token.type != 'data':
             yield token
             continue
         ctx.token = token
         value = self.normalize(ctx)
         yield Token(token.lineno, 'data', value)
Example #23
0
 def filter_stream(self, stream: Any):
     lineno = 0
     for token in stream:
         if token.type == "data":
             prefix, newline, value = token.value.partition("\n")
             if token.lineno > lineno:
                 prefix = dedent(prefix)
             token = Token(token.lineno, "data",
                           prefix + newline + dedent(value))
         yield token
         lineno = token.lineno
Example #24
0
 def filter_stream(self, stream):
     compressor = Compressor()
     for token in stream:
         if token.type != 'data':
             yield token
             continue
         try:
             value = compressor.compress(token.value)
         except CompressError as e:
             self.fail(e.message, stream, token)
         else:
             yield Token(token.lineno, 'data', value)
Example #25
0
    def arg_tokenizer(self, token:Token):
        for mo in DEF_ARG_RE.finditer(token.value):
            print(mo)
            print(mo['name'], mo['double'])

        yield token
        return

        if not mo:
            yield token
            return
        while mo:
            if mo.lastgroup == 'double':
                yield Token(token.lineno, TOKEN_DATA, DEF_PREFIX)
            else:
                yield from (
                    Token(token.lineno, TOKEN_VARIABLE_BEGIN, "{{"),
                    Token(token.lineno, TOKEN_NAME, mo['name']),
                    Token(token.lineno, TOKEN_VARIABLE_END, "}}"),
                )
            mo = DEF_ARG_RE.search(token.value, mo.endpos)
    def filter_stream(self, stream):
        """
        We convert
        {{ some.variable | filter1 | filter 2}}
            to
        {{ some.variable | filter1 | filter 2 | yaml}}

        ... for all variable declarations in the template

        This function is called by jinja2 immediately
        after the lexing stage, but before the parser is called.
        """
        while not stream.eos:
            token = next(stream)
            if token.test("variable_begin"):
                var_expr = []
                while not token.test("variable_end"):
                    var_expr.append(token)
                    token = next(stream)
                variable_end = token

                last_token = var_expr[-1]
                if last_token.test("name") and last_token.value == "yaml":
                    # don't yaml twice
                    continue

                # Wrap the whole expression between the `variable_begin`
                # and `variable_end` marks in parens:
                var_expr.insert(1, Token(var_expr[0].lineno, "lparen", None))
                var_expr.append(Token(var_expr[-1].lineno, "rparen", None))

                var_expr.append(Token(token.lineno, "pipe", "|"))
                var_expr.append(Token(token.lineno, "name", "yaml"))

                var_expr.append(variable_end)

                for token in var_expr:
                    yield token
            else:
                yield token
Example #27
0
    def filter_stream(self, stream):
        if not self.page:
            self.page = str(self.filename)
        self.name = stream.name
        self.filename = stream.filename
        for token in stream:
            if token.type != 'data':
                yield token
                continue

            self.lineno = token.lineno
            self.normalize(token.value)
            yield Token(token.lineno, 'data', ''.join(self.buf))
Example #28
0
    def filter_stream(self, stream):
        super_stream = super().filter_stream(stream)

        for token in super_stream:
            if token.type != 'data':
                yield token
                continue

            value = re.sub(r'\n', '', token.value)
            value = re.sub(r'(>)(\s+)(<)', r'\1\3', value)
            value = re.sub(r'\s+', r' ', value)
            value = re.sub(r'(")(\s+)(/>)', r'\1\3', value)

            yield Token(token.lineno, token.type, value)
Example #29
0
 def interpolate(self, token):
     pos = 0
     end = len(token.value)
     lineno = token.lineno
     while 1:
         match = _gettext_re.search(token.value, pos)
         if match is None:
             break
         value = token.value[pos:match.start()]
         if value:
             yield Token(lineno, "data", value)
         lineno += count_newlines(token.value)
         yield Token(lineno, "variable_begin", None)
         yield Token(lineno, "name", "gettext")
         yield Token(lineno, "lparen", None)
         yield Token(lineno, "string", match.group(1))
         yield Token(lineno, "rparen", None)
         yield Token(lineno, "variable_end", None)
         pos = match.end()
     if pos < end:
         yield Token(lineno, "data", token.value[pos:])
Example #30
0
 def interpolate(self, token):
     pos = 0
     end = len(token.value)
     lineno = token.lineno
     while 1:
         match = _gettext_re.search(token.value, pos)
         if match is None:
             break
         value = token.value[pos:match.start()]
         if value:
             yield Token(lineno, 'data', value)
         lineno += count_newlines(token.value)
         yield Token(lineno, 'variable_begin', None)
         yield Token(lineno, 'name', 'gettext')
         yield Token(lineno, 'lparen', None)
         yield Token(lineno, 'string', match.group(1))
         yield Token(lineno, 'rparen', None)
         yield Token(lineno, 'variable_end', None)
         pos = match.end()
     if pos < end:
         yield Token(lineno, 'data', token.value[pos:])