Ejemplo n.º 1
0
 def parse_primary(self):
     token = self.stream.current
     if token.type == "name":
         if token.value in ("true", "false", "True", "False"):
             node = nodes.Const(token.value in ("true", "True"),
                                lineno=token.lineno)
         elif token.value in ("none", "None"):
             node = nodes.Const(None, lineno=token.lineno)
         else:
             node = nodes.Name(token.value, "load", lineno=token.lineno)
         next(self.stream)
     elif token.type == "string":
         next(self.stream)
         buf = [token.value]
         lineno = token.lineno
         while self.stream.current.type == "string":
             buf.append(self.stream.current.value)
             next(self.stream)
         node = nodes.Const("".join(buf), lineno=lineno)
     elif token.type in ("integer", "float"):
         next(self.stream)
         node = nodes.Const(token.value, lineno=token.lineno)
     elif token.type == "lparen":
         next(self.stream)
         node = self.parse_tuple(explicit_parentheses=True)
         self.stream.expect("rparen")
     elif token.type == "lbracket":
         node = self.parse_list()
     elif token.type == "lbrace":
         node = self.parse_dict()
     else:
         self.fail("unexpected '%s'" % describe_token(token), token.lineno)
     return node
Ejemplo n.º 2
0
 def parse_primary(self):
     token = self.stream.current
     if token.type == 'name':
         if token.value in ('true', 'false', 'True', 'False'):
             node = nodes.Const(token.value in ('true', 'True'),
                                lineno=token.lineno)
         elif token.value in ('none', 'None'):
             node = nodes.Const(None, lineno=token.lineno)
         else:
             node = nodes.Name(token.value, 'load', lineno=token.lineno)
         next(self.stream)
     elif token.type == 'string':
         next(self.stream)
         buf = [token.value]
         lineno = token.lineno
         while self.stream.current.type == 'string':
             buf.append(self.stream.current.value)
             next(self.stream)
         node = nodes.Const(''.join(buf), lineno=lineno)
     elif token.type in ('integer', 'float'):
         next(self.stream)
         node = nodes.Const(token.value, lineno=token.lineno)
     elif token.type == 'lparen':
         next(self.stream)
         node = self.parse_tuple(explicit_parentheses=True)
         self.stream.expect('rparen')
     elif token.type == 'lbracket':
         node = self.parse_list()
     elif token.type == 'lbrace':
         node = self.parse_dict()
     else:
         self.fail("unexpected '%s'" % describe_token(token), token.lineno)
     return node
Ejemplo n.º 3
0
    def filter_stream(self, stream):
        verbose_depth = 0
        first = True
        while 1:
            if (stream.current.type == 'block_begin' and stream.look().value
                    in ('whitespace', 'endwhitespace')):
                next(stream)
                if stream.current.value == 'whitespace':
                    verbose_depth += 1
                else:
                    verbose_depth -= 1
                    if verbose_depth < 0:
                        raise self.error("Unexpected tag 'endverbose'")
                next(stream)
                if stream.current.type != 'block_end':
                    raise self.error(
                        "Unexpected token '%s', expected end of block" %
                        lexer.describe_token(stream.current), stream)
            elif verbose_depth == 0 and stream.current.type == 'data':
                # Reduce all whitespace to a single space
                token = lexer.Token(
                    stream.current.lineno, "data",
                    self.WHITESPACE.sub(" ", stream.current.value))

                # Special case to remove leading space before the doctype
                # declaration in HTML documents
                if first:
                    first = False
                    if token.value.lower().startswith(" <!doctype "):
                        token = lexer.Token(token.lineno, "data",
                                            token.value[1:])
                yield token
            else:
                yield stream.current
            next(stream)
Ejemplo n.º 4
0
 def parse_primary(self):
     token = self.stream.current
     if token.type == 'name':
         if token.value in ('true', 'false', 'True', 'False'):
             node = nodes.Const(token.value in ('true', 'True'),
                                lineno=token.lineno)
         elif token.value in ('none', 'None'):
             node = nodes.Const(None, lineno=token.lineno)
         else:
             node = nodes.Name(token.value, 'load', lineno=token.lineno)
         next(self.stream)
     elif token.type == 'string':
         next(self.stream)
         buf = [token.value]
         lineno = token.lineno
         while self.stream.current.type == 'string':
             buf.append(self.stream.current.value)
             next(self.stream)
         node = nodes.Const(''.join(buf), lineno=lineno)
     elif token.type in ('integer', 'float'):
         next(self.stream)
         node = nodes.Const(token.value, lineno=token.lineno)
     elif token.type == 'lparen':
         next(self.stream)
         node = self.parse_tuple(explicit_parentheses=True)
         self.stream.expect('rparen')
     elif token.type == 'lbracket':
         node = self.parse_list()
     elif token.type == 'lbrace':
         node = self.parse_dict()
     else:
         self.fail("unexpected '{0!s}'".format(describe_token(token)), token.lineno)
     return node
Ejemplo n.º 5
0
 def filter_stream(self, stream):
     compressor = Compressor()
     strip_depth = 0
     while 1:
         if stream.current.type == 'block_begin':
             if stream.look().test('name:strip') or \
                stream.look().test('name:endstrip'):
                 stream.skip()
                 if stream.current.value == 'strip':
                     strip_depth += 1
                 else:
                     strip_depth -= 1
                     if strip_depth < 0:
                         self.fail('Unexpected tag endstrip', stream, token)
                 stream.skip()
                 if stream.current.type != 'block_end':
                     self.fail('expected end of block, got %s' %
                              describe_token(stream.current), stream, token)
                 stream.skip()
         if strip_depth > 0 and stream.current.type == 'data':
             token = stream.current
             try:
                 value = compressor.compress(token.value)
             except CompressError as e:
                 self.fail(e.message, stream, token)
             else:
                 yield Token(stream.current.lineno, 'data', value)
         else:
             yield stream.current
         next(stream)
Ejemplo n.º 6
0
 def filter_stream(self, stream):
     ctx = StreamProcessContext(stream)
     strip_depth = 0
     while 1:
         if stream.current.type == 'block_begin':
             if stream.look().test('name:strip') or \
                stream.look().test('name:endstrip'):
                 stream.skip()
                 if stream.current.value == 'strip':
                     strip_depth += 1
                 else:
                     strip_depth -= 1
                     if strip_depth < 0:
                         ctx.fail('Unexpected tag endstrip')
                 stream.skip()
                 if stream.current.type != 'block_end':
                     ctx.fail('expected end of block, got %s' %
                              describe_token(stream.current))
                 stream.skip()
         if strip_depth > 0 and stream.current.type == 'data':
             ctx.token = stream.current
             value = self.normalize(ctx)
             yield Token(stream.current.lineno, 'data', value)
         else:
             yield stream.current
         stream.next()
Ejemplo n.º 7
0
 def filter_stream(self, stream):
     ctx = StreamProcessContext(stream)
     strip_depth = 0
     while True:
         if stream.current.type == 'block_begin':
             if stream.look().test('name:strip') or \
                stream.look().test('name:endstrip'):
                 stream.skip()
                 if stream.current.value == 'strip':
                     strip_depth += 1
                 else:
                     strip_depth -= 1
                     if strip_depth < 0:
                         ctx.fail('Unexpected tag endstrip')
                 stream.skip()
                 if stream.current.type != 'block_end':
                     ctx.fail('expected end of block, got %s' %
                              describe_token(stream.current))
                 stream.skip()
         if strip_depth > 0 and stream.current.type == 'data':
             ctx.token = stream.current
             value = self.normalize(ctx)
             yield Token(stream.current.lineno, 'data', value)
         else:
             yield stream.current
         next(stream)
Ejemplo n.º 8
0
    def parse_tuple(self,
                    simplified=False,
                    with_condexpr=True,
                    extra_end_rules=None,
                    explicit_parentheses=False):
        lineno = self.stream.current.lineno
        if simplified:
            parse = self.parse_primary
        elif with_condexpr:
            parse = self.parse_expression
        else:
            parse = lambda: self.parse_expression(with_condexpr=False)
        args = []
        is_tuple = False
        while 1:
            if args:
                self.stream.expect('comma')
            if self.is_tuple_end(extra_end_rules):
                break
            args.append(parse())
            if self.stream.current.type == 'comma':
                is_tuple = True
            else:
                break
            lineno = self.stream.current.lineno

        if not is_tuple:
            if args:
                return args[0]
            if not explicit_parentheses:
                self.fail("Expected an expression, got '%s'" %
                          describe_token(self.stream.current))
        return nodes.Tuple(args, 'load', lineno=lineno)
Ejemplo n.º 9
0
 def filter_stream(self, stream):
     ctx = StreamProcessContext(stream)
     strip_depth = 0
     while True:
         if stream.current.type == "block_begin":
             if stream.look().test("name:strip") or stream.look().test(
                     "name:endstrip"):
                 stream.skip()
                 if stream.current.value == "strip":
                     strip_depth += 1
                 else:
                     strip_depth -= 1
                     if strip_depth < 0:
                         ctx.fail("Unexpected tag endstrip")
                 stream.skip()
                 if stream.current.type != "block_end":
                     ctx.fail("expected end of block, got %s" %
                              describe_token(stream.current))
                 stream.skip()
         if strip_depth > 0 and stream.current.type == "data":
             ctx.token = stream.current
             value = self.normalize(ctx)
             yield Token(stream.current.lineno, "data", value)
         else:
             yield stream.current
         next(stream)
Ejemplo n.º 10
0
def describe_token(token):
    from jinja2 import lexer

    if token.type == lexer.TOKEN_EOF:
        return "end of statement"
    else:
        return lexer.describe_token(token)
Ejemplo n.º 11
0
    def filter_stream(self, stream):
        verbose_depth = 0
        first = True
        while 1:
            if stream.current.type == "block_begin" and stream.look().value in ("whitespace", "endwhitespace"):
                next(stream)
                if stream.current.value == "whitespace":
                    verbose_depth += 1
                else:
                    verbose_depth -= 1
                    if verbose_depth < 0:
                        raise self.error("Unexpected tag 'endverbose'")
                next(stream)
                if stream.current.type != "block_end":
                    raise self.error(
                        "Unexpected token '%s', expected end of block" % lexer.describe_token(stream.current), stream
                    )
            elif verbose_depth == 0 and stream.current.type == "data":
                # Reduce all whitespace to a single space
                token = lexer.Token(stream.current.lineno, "data", self.WHITESPACE.sub(" ", stream.current.value))

                # Special case to remove leading space before the doctype
                # declaration in HTML documents
                if first:
                    first = False
                    if token.value.lower().startswith(" <!doctype "):
                        token = lexer.Token(token.lineno, "data", token.value[1:])
                yield token
            else:
                yield stream.current
            next(stream)
Ejemplo n.º 12
0
    def parse_tuple(self, simplified = False, with_condexpr = True, extra_end_rules = None, explicit_parentheses = False):
        lineno = self.stream.current.lineno
        if simplified:
            parse = self.parse_primary
        elif with_condexpr:
            parse = self.parse_expression
        else:
            parse = lambda : self.parse_expression(with_condexpr=False)
        args = []
        is_tuple = False
        while 1:
            if args:
                self.stream.expect('comma')
            if self.is_tuple_end(extra_end_rules):
                break
            args.append(parse())
            if self.stream.current.type == 'comma':
                is_tuple = True
            else:
                break
            lineno = self.stream.current.lineno

        if not is_tuple:
            if args:
                return args[0]
            if not explicit_parentheses:
                self.fail("Expected an expression, got '%s'" % describe_token(self.stream.current))
        return nodes.Tuple(args, 'load', lineno=lineno)
Ejemplo n.º 13
0
    def parse_tuple(
        self,
        simplified=False,
        with_condexpr=True,
        extra_end_rules=None,
        explicit_parentheses=False,
    ):
        """Works like `parse_expression` but if multiple expressions are
        delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
        This method could also return a regular expression instead of a tuple
        if no commas where found.

        The default parsing mode is a full tuple.  If `simplified` is `True`
        only names and literals are parsed.  The `no_condexpr` parameter is
        forwarded to :meth:`parse_expression`.

        Because tuples do not require delimiters and may end in a bogus comma
        an extra hint is needed that marks the end of a tuple.  For example
        for loops support tuples between `for` and `in`.  In that case the
        `extra_end_rules` is set to ``['name:in']``.

        `explicit_parentheses` is true if the parsing was triggered by an
        expression in parentheses.  This is used to figure out if an empty
        tuple is a valid expression or not.
        """
        lineno = self.stream.current.lineno
        if simplified:
            parse = self.parse_primary
        elif with_condexpr:
            parse = self.parse_expression
        else:
            parse = lambda: self.parse_expression(with_condexpr=False)
        args = []
        is_tuple = False
        while 1:
            if args:
                self.stream.expect("comma")
            if self.is_tuple_end(extra_end_rules):
                break
            args.append(parse())
            if self.stream.current.type == "comma":
                is_tuple = True
            else:
                break
            lineno = self.stream.current.lineno

        if not is_tuple:
            if args:
                return args[0]

            # if we don't have explicit parentheses, an empty tuple is
            # not a valid expression.  This would mean nothing (literally
            # nothing) in the spot of an expression would be an empty
            # tuple.
            if not explicit_parentheses:
                self.fail("Expected an expression, got '%s'" %
                          describe_token(self.stream.current))

        return nodes.Tuple(args, "load", lineno=lineno)
Ejemplo n.º 14
0
    def parse_tuple(self, simplified=False, with_condexpr=True,
                    extra_end_rules=None, explicit_parentheses=False):
        """Works like `parse_expression` but if multiple expressions are
        delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
        This method could also return a regular expression instead of a tuple
        if no commas where found.

        The default parsing mode is a full tuple.  If `simplified` is `True`
        only names and literals are parsed.  The `no_condexpr` parameter is
        forwarded to :meth:`parse_expression`.

        Because tuples do not require delimiters and may end in a bogus comma
        an extra hint is needed that marks the end of a tuple.  For example
        for loops support tuples between `for` and `in`.  In that case the
        `extra_end_rules` is set to ``['name:in']``.

        `explicit_parentheses` is true if the parsing was triggered by an
        expression in parentheses.  This is used to figure out if an empty
        tuple is a valid expression or not.
        """
        lineno = self.stream.current.lineno
        if simplified:
            parse = self.parse_primary
        elif with_condexpr:
            parse = self.parse_expression
        else:
            parse = lambda: self.parse_expression(with_condexpr=False)
        args = []
        is_tuple = False
        while 1:
            if args:
                self.stream.expect('comma')
            if self.is_tuple_end(extra_end_rules):
                break
            args.append(parse())
            if self.stream.current.type == 'comma':
                is_tuple = True
            else:
                break
            lineno = self.stream.current.lineno

        if not is_tuple:
            if args:
                return args[0]

            # if we don't have explicit parentheses, an empty tuple is
            # not a valid expression.  This would mean nothing (literally
            # nothing) in the spot of an expression would be an empty
            # tuple.
            if not explicit_parentheses:
                self.fail('Expected an expression, got \'{0!s}\''.format(
                          describe_token(self.stream.current)))

        return nodes.Tuple(args, 'load', lineno=lineno)
Ejemplo n.º 15
0
 def _processToken(self, ctx, stream, test_token):
     change = 0
     if stream.look().test("name:%s" % test_token) or stream.look().test("name:end%s" % test_token):
         stream.skip()
         if stream.current.value == test_token:
             change = 1
         else:
             change = -1
         stream.skip()
         if stream.current.type != "block_end":
             ctx.fail("expected end of block, got %s" % describe_token(stream.current))
         stream.skip()
     return change
Ejemplo n.º 16
0
    def parse(self, parser):
        lineno = next(parser.stream).lineno
        view_name = parser.stream.expect(lexer.TOKEN_STRING)
        view_name = nodes.Const(view_name.value, lineno=view_name.lineno)

        args = None
        kwargs = None
        as_var = None

        while parser.stream.current.type != lexer.TOKEN_BLOCK_END:
            token = parser.stream.current
            if token.test('name:as'):
                next(parser.stream)
                token = parser.stream.expect(lexer.TOKEN_NAME)
                as_var = nodes.Name(token.value, 'store', lineno=token.lineno)
                break
            if args is not None:
                args.append(self.parse_expression(parser))
            elif kwargs is not None:
                if token.type != lexer.TOKEN_NAME:
                    parser.fail(
                        "got '{}', expected name for keyword argument"
                        "".format(lexer.describe_token(token)),
                        lineno=token.lineno
                    )
                arg = token.value
                next(parser.stream)
                parser.stream.expect(lexer.TOKEN_ASSIGN)
                token = parser.stream.current
                kwargs[arg] = self.parse_expression(parser)
            else:
                if parser.stream.look().type == lexer.TOKEN_ASSIGN:
                    kwargs = {}
                else:
                    args = []
                continue

        if args is None:
            args = []
        args.insert(0, view_name)

        if kwargs is not None:
            kwargs = [nodes.Keyword(key, val) for key, val in kwargs.items()]

        call = self.call_method('_url_reverse', args, kwargs, lineno=lineno)
        if as_var is None:
            return nodes.Output([call], lineno=lineno)
        else:
            return nodes.Assign(as_var, call, lineno=lineno)
Ejemplo n.º 17
0
 def _processToken(self, ctx, stream, test_token):
     change = 0
     if (stream.look().test('name:%s' % test_token)
             or stream.look().test('name:end%s' % test_token)):
         stream.skip()
         if stream.current.value == test_token:
             change = 1
         else:
             change = -1
         stream.skip()
         if stream.current.type != 'block_end':
             ctx.fail('expected end of block, got %s' %
                      describe_token(stream.current))
         stream.skip()
     return change
Ejemplo n.º 18
0
    def parse(self, parser):
        lineno = next(parser.stream).lineno
        view_name = parser.stream.expect(lexer.TOKEN_STRING)
        view_name = nodes.Const(view_name.value, lineno=view_name.lineno)

        args = None
        kwargs = None
        as_var = None

        while parser.stream.current.type != lexer.TOKEN_BLOCK_END:
            token = parser.stream.current
            if token.test("name:as"):
                next(parser.stream)
                token = parser.stream.expect(lexer.TOKEN_NAME)
                as_var = nodes.Name(token.value, "store", lineno=token.lineno)
                break
            if args is not None:
                args.append(self.parse_expression(parser))
            elif kwargs is not None:
                if token.type != lexer.TOKEN_NAME:
                    parser.fail(
                        "got '{}', expected name for keyword argument" "".format(lexer.describe_token(token)),
                        lineno=token.lineno,
                    )
                arg = token.value
                next(parser.stream)
                parser.stream.expect(lexer.TOKEN_ASSIGN)
                token = parser.stream.current
                kwargs[arg] = self.parse_expression(parser)
            else:
                if parser.stream.look().type == lexer.TOKEN_ASSIGN:
                    kwargs = {}
                else:
                    args = []
                continue

        if args is None:
            args = []
        args.insert(0, view_name)

        if kwargs is not None:
            kwargs = [nodes.Keyword(key, val) for key, val in kwargs.items()]

        call = self.call_method("_url_reverse", args, kwargs, lineno=lineno)
        if as_var is None:
            return nodes.Output([call], lineno=lineno)
        else:
            return nodes.Assign(as_var, call, lineno=lineno)
Ejemplo n.º 19
0
def describe_token(token):
    from jinja2 import lexer
    if token.type == lexer.TOKEN_EOF:
        return 'end of statement'
    else:
        return lexer.describe_token(token)