def compile_nodelist(self):
        """
        Pass template name to parser instance
        """

        if self.engine.debug:
            lexer = DebugLexer19(self.source)
        else:
            lexer = Lexer(self.source)

        tokens = lexer.tokenize()
        parser = Parser(
            tokens,
            self.engine.template_libraries,
            self.engine.template_builtins,
        )

        parser.template_name = self.origin.template_name

        try:
            return parser.parse()
        except Exception as e:
            if self.engine.debug:
                e.template_debug = self.get_exception_info(e, e.token)
            raise
示例#2
0
 def get(key):
     cache_key = CACHE_PREFIX + 'image' + get_language() + key
     content = cache.get(cache_key)
     if content is None:
         obj, created = Chunk.objects.get_or_create(
             key=key, defaults={'content': key})
         #a content can to contain other chunks as vars
         lexer = Lexer(obj.content,0)
         content = ''.join(map(Chunk.process_token,lexer.tokenize()))
         cache.set(cache_key, content)
     return content
示例#3
0
    def __str__(self):
        my_lexer = Lexer(self.title, UNKNOWN_SOURCE)
        my_tokens = my_lexer.tokenize()

        # Deliberately strip off template tokens that are not text or variable.
        for my_token in my_tokens:
            if my_token.token_type not in (TOKEN_TEXT, TOKEN_VAR):
                my_tokens.remove(my_token)

        my_parser = Parser(my_tokens)
        return my_parser.parse().render(SiteTree.get_global_context())
示例#4
0
def _load_all_templates(directory):
    """
    Loads all templates in a directory (recursively) and yields tuples of
    template tokens and template paths.
    """
    if os.path.exists(directory):
        for name in os.listdir(directory):
            path = os.path.join(directory, name)
            if os.path.isdir(path):
                for template in _load_all_templates(path):
                    yield template
            elif path.endswith('.html'):
                with open(path, 'rb') as fobj:
                    source = fobj.read().decode(settings.FILE_CHARSET)
                    lexer = Lexer(source, path)
                    yield lexer.tokenize(), path
示例#5
0
def render_custom_content(body, context_data={}):
    """Renders custom content for the payload using Django templating.

    This will take the custom payload content template provided by
    the user and render it using a stripped down version of Django's
    templating system.

    In order to keep the payload safe, we use a limited Context along with a
    custom Parser that blocks certain template tags. This gives us
    tags like {% for %} and {% if %}, but blacklists tags like {% load %}
    and {% include %}.
    """
    lexer = Lexer(body, origin=None)
    parser = CustomPayloadParser(lexer.tokenize())
    template = Template('')
    template.nodelist = parser.parse()

    return template.render(Context(context_data))
示例#6
0
def render_custom_content(body, context_data={}):
    """Renders custom content for the payload using Django templating.

    This will take the custom payload content template provided by
    the user and render it using a stripped down version of Django's
    templating system.

    In order to keep the payload safe, we use a limited Context along with a
    custom Parser that blocks certain template tags. This gives us
    tags like {% for %} and {% if %}, but blacklists tags like {% load %}
    and {% include %}.
    """
    lexer = Lexer(body, origin=None)
    parser = CustomPayloadParser(lexer.tokenize())
    template = Template('')
    template.nodelist = parser.parse()

    return template.render(Context(context_data))
示例#7
0
文件: check.py 项目: ym68/django-cms
def _load_all_templates(directory):
    """
    Loads all templates in a directory (recursively) and yields tuples of
    template tokens and template paths.
    """
    if os.path.exists(directory):
        for name in os.listdir(directory):
            path = os.path.join(directory, name)
            if os.path.isdir(path):
                for template in _load_all_templates(path):
                    yield template
            elif path.endswith('.html'):
                with open(path, 'rb') as fobj:
                    source = fobj.read().decode(settings.FILE_CHARSET)
                    if DJANGO_1_8:
                        lexer = Lexer(source, path)
                    else:
                        lexer = Lexer(source)
                    yield lexer.tokenize(), path
示例#8
0
def render_custom_content(body, context_data={}):
    """Render custom content for the payload using Django templating.

    This will take the custom payload content template provided by
    the user and render it using a stripped down version of Django's
    templating system.

    In order to keep the payload safe, we use a limited Context along with a
    custom Parser that blocks certain template tags. This gives us
    tags like ``{% for %}`` and ``{% if %}``, but blacklists tags like
    ``{% load %}`` and ``{% include %}``.

    Args:
        body (unicode):
            The template content to render.

        context_data (dict, optional):
            Context data for the template.

    Returns:
        unicode:
        The rendered template.

    Raises:
        django.template.TemplateSyntaxError:
            There was a syntax error in the template.
    """
    template = Template('')

    if django.VERSION >= (1, 9):
        lexer = Lexer(body)
        parser_args = (template.engine.template_libraries,
                       template.engine.template_builtins, template.origin)
    else:
        lexer = Lexer(body, origin=None)
        parser_args = ()

    parser = CustomPayloadParser(lexer.tokenize(), *parser_args)
    template.nodelist = parser.parse()

    return template.render(Context(context_data))
示例#9
0
def extract_django(fileobj, keywords, comment_tags, options):
    """Extract messages from Django template files.

    :param fileobj: the file-like object the messages should be extracted from
    :param keywords: a list of keywords (i.e. function names) that should
                     be recognized as translation functions
    :param comment_tags: a list of translator tags to search for and
                         include in the results
    :param options: a dictionary of additional options (optional)
    :return: an iterator over ``(lineno, funcname, message, comments)``
             tuples
    :rtype: ``iterator``
    """
    intrans = False
    inplural = False
    trimmed = False
    message_context = None
    singular = []
    plural = []
    lineno = 1

    encoding = options.get('encoding', 'utf8')
    text = fileobj.read().decode(encoding)

    try:
        text_lexer = Lexer(text)
    except TypeError:
        # Django 1.9 changed the way we invoke Lexer; older versions
        # require two parameters.
        text_lexer = Lexer(text, None)

    for t in text_lexer.tokenize():
        lineno += t.contents.count('\n')
        if intrans:
            if t.token_type == TOKEN_BLOCK:
                endbmatch = endblock_re.match(t.contents)
                pluralmatch = plural_re.match(t.contents)
                if endbmatch:
                    if inplural:
                        if message_context:
                            yield (
                                lineno,
                                'npgettext',
                                [
                                    smart_text(message_context),
                                    smart_text(join_tokens(singular, trimmed)),
                                    smart_text(join_tokens(plural, trimmed))
                                ],
                                [],
                            )
                        else:
                            yield (lineno, 'ngettext',
                                   (smart_text(join_tokens(singular, trimmed)),
                                    smart_text(join_tokens(plural,
                                                           trimmed))), [])
                    else:
                        if message_context:
                            yield (
                                lineno,
                                'pgettext',
                                [
                                    smart_text(message_context),
                                    smart_text(join_tokens(singular, trimmed))
                                ],
                                [],
                            )
                        else:
                            yield (lineno, None,
                                   smart_text(join_tokens(singular,
                                                          trimmed)), [])

                    intrans = False
                    inplural = False
                    message_context = None
                    singular = []
                    plural = []
                elif pluralmatch:
                    inplural = True
                else:
                    raise SyntaxError('Translation blocks must not include '
                                      'other block tags: %s' % t.contents)
            elif t.token_type == TOKEN_VAR:
                if inplural:
                    plural.append('%%(%s)s' % t.contents)
                else:
                    singular.append('%%(%s)s' % t.contents)
            elif t.token_type == TOKEN_TEXT:
                if inplural:
                    plural.append(t.contents)
                else:
                    singular.append(t.contents)
        else:
            if t.token_type == TOKEN_BLOCK:
                imatch = inline_re.match(t.contents)
                bmatch = block_re.match(t.contents)
                cmatches = constant_re.findall(t.contents)
                if imatch:
                    g = imatch.group(1)
                    g = strip_quotes(g)
                    message_context = imatch.group(3)
                    if message_context:
                        # strip quotes
                        message_context = message_context[1:-1]
                        yield (
                            lineno,
                            'pgettext',
                            [smart_text(message_context),
                             smart_text(g)],
                            [],
                        )
                        message_context = None
                    else:
                        yield lineno, None, smart_text(g), []
                elif bmatch:
                    if bmatch.group(2):
                        message_context = bmatch.group(2)[1:-1]
                    for fmatch in constant_re.findall(t.contents):
                        stripped_fmatch = strip_quotes(fmatch)
                        yield lineno, None, smart_text(stripped_fmatch), []
                    intrans = True
                    inplural = False
                    trimmed = 'trimmed' in t.split_contents()
                    singular = []
                    plural = []
                elif cmatches:
                    for cmatch in cmatches:
                        stripped_cmatch = strip_quotes(cmatch)
                        yield lineno, None, smart_text(stripped_cmatch), []
            elif t.token_type == TOKEN_VAR:
                parts = t.contents.split('|')
                cmatch = constant_re.match(parts[0])
                if cmatch:
                    stripped_cmatch = strip_quotes(cmatch.group(1))
                    yield lineno, None, smart_text(stripped_cmatch), []
                for p in parts[1:]:
                    if p.find(':_(') >= 0:
                        p1 = p.split(':', 1)[1]
                        if p1[0] == '_':
                            p1 = p1[1:]
                        if p1[0] == '(':
                            p1 = p1.strip('()')
                        p1 = strip_quotes(p1)
                        yield lineno, None, smart_text(p1), []
示例#10
0
def extract_django(fileobj, keywords, comment_tags, options):
    """Extract messages from Django template files.

    Adapted from https://github.com/python-babel/django-babel/blob/master/django_babel/extract.py

    :param fileobj: the file-like object the messages should be extracted from
    :param keywords: a list of keywords (i.e. function names) that should
                     be recognized as translation functions
    :param comment_tags: a list of translator tags to search for and
                         include in the results
    :param options: a dictionary of additional options (optional)
    :return: an iterator over ``(lineno, funcname, message, comments)``
             tuples
    :rtype: ``iterator``
    """
    intrans = False
    inplural = False
    trimmed = False
    message_context = None
    singular = []
    plural = []
    lineno = 1

    encoding = options.get("encoding", "utf8")
    text = fileobj.read().decode(encoding)

    try:
        text_lexer = Lexer(text)
    except TypeError:
        # Django 1.9 changed the way we invoke Lexer; older versions
        # require two parameters.
        text_lexer = Lexer(text, None)

    # raise SystemError([t.contents for t in text_lexer.tokenize()])

    for t in text_lexer.tokenize():
        lineno += t.contents.count("\n")
        if t.token_type == TOKEN_BLOCK:
            imatch = inline_re.match(t.contents)
            if imatch:
                g = imatch.group(1)
                g = strip_quotes(g)
                default_message = imatch.group(3)
                if default_message:
                    comments = [
                        COMMENT_TAG_FOR_DEFAULT_MESSAGE + ": " +
                        strip_quotes(default_message)
                    ]
                else:
                    comments = []
                comment = imatch.group(7)
                if comment:
                    comments.append(strip_quotes(comment))
                message_context = imatch.group(5)
                if message_context:
                    # strip quotes
                    message_context = message_context[1:-1]
                    yield (
                        lineno,
                        "pgettext",
                        [smart_text(message_context),
                         smart_text(g)],
                        comments,
                    )
                    message_context = None
                else:
                    yield lineno, None, smart_text(g), comments
示例#11
0
            <p>{% trans 'None available' %}</p>
            {% else %}
            <ul class="actionlist">
            {% for entry in admin_log %}
            <li class="{% if entry.is_addition %}addlink{% endif %}{% if entry.is_change %}changelink{% endif %}{% if entry.is_deletion %}deletelink{% endif %}">
                {% if entry.is_deletion or not entry.get_admin_url %}
                    {{ entry.object_repr }}
                {% else %}
                    <a href="{{ entry.get_admin_url }}">{{ entry.object_repr }}</a>
                {% endif %}
                <br/>
                {% if entry.content_type %}
                    <span class="mini quiet">{% filter capfirst %}{% trans entry.content_type.name %}{% endfilter %}</span>
                {% else %}
                    <span class="mini quiet">{% trans 'Unknown content' %}</span>
                {% endif %}
            </li>
            {% endfor %}
            </ul>
            {% endif %}
    </div>
</div>
{% endblock %}
"""

elapsed = 0
for i in xrange(5000):
    # print i
    lexer = Lexer(template_source, None)
    lexer.tokenize()
示例#12
0
            <p>{% trans 'None available' %}</p>
            {% else %}
            <ul class="actionlist">
            {% for entry in admin_log %}
            <li class="{% if entry.is_addition %}addlink{% endif %}{% if entry.is_change %}changelink{% endif %}{% if entry.is_deletion %}deletelink{% endif %}">
                {% if entry.is_deletion or not entry.get_admin_url %}
                    {{ entry.object_repr }}
                {% else %}
                    <a href="{{ entry.get_admin_url }}">{{ entry.object_repr }}</a>
                {% endif %}
                <br/>
                {% if entry.content_type %}
                    <span class="mini quiet">{% filter capfirst %}{% trans entry.content_type.name %}{% endfilter %}</span>
                {% else %}
                    <span class="mini quiet">{% trans 'Unknown content' %}</span>
                {% endif %}
            </li>
            {% endfor %}
            </ul>
            {% endif %}
    </div>
</div>
{% endblock %}
"""

elapsed = 0
for i in xrange(10000):
    # print i
    lexer = Lexer(template_source, None)
    lexer.tokenize()
示例#13
0
    def lines(self):
        source_lines = set()

        if SHOW_PARSING:
            print("-------------- {}".format(self.filename))

        if django.VERSION >= (1, 9):
            lexer = Lexer(self.source())
        else:
            lexer = Lexer(self.source(), self.filename)
        tokens = lexer.tokenize()

        # Are we inside a comment?
        comment = False
        # Is this a template that extends another template?
        extends = False
        # Are we inside a block?
        inblock = False

        for token in tokens:
            if SHOW_PARSING:
                print(
                    "%10s %2d: %r" % (
                        TOKEN_MAPPING[token.token_type],
                        token.lineno,
                        token.contents,
                    )
                )
            if token.token_type == TOKEN_BLOCK:
                if token.contents == "endcomment":
                    comment = False
                    continue

            if comment:
                continue

            if token.token_type == TOKEN_BLOCK:
                if token.contents.startswith("endblock"):
                    inblock = False
                elif token.contents.startswith("block"):
                    inblock = True
                    if extends:
                        continue

                if extends and not inblock:
                    # In an inheriting tempalte, ignore all tags outside of
                    # blocks.
                    continue

                if token.contents == "comment":
                    comment = True
                if token.contents.startswith("end"):
                    continue
                elif token.contents in ("else", "empty"):
                    continue
                elif token.contents.startswith("elif"):
                    # NOTE: I don't like this, I want to be able to trace elif
                    # nodes, but the Django template engine doesn't track them
                    # in a way that we can get useful information from them.
                    continue
                elif token.contents.startswith("extends"):
                    extends = True

                source_lines.add(token.lineno)

            elif token.token_type == TOKEN_VAR:
                source_lines.add(token.lineno)

            elif token.token_type == TOKEN_TEXT:
                if extends and not inblock:
                    continue
                # Text nodes often start with newlines, but we don't want to
                # consider that first line to be part of the text.
                lineno = token.lineno
                lines = token.contents.splitlines(True)
                num_lines = len(lines)
                if lines[0].isspace():
                    lineno += 1
                    num_lines -= 1
                source_lines.update(range(lineno, lineno+num_lines))

            if SHOW_PARSING:
                print("\t\t\tNow source_lines is: {!r}".format(source_lines))

        return source_lines
示例#14
0
def extract_django(fileobj, keywords, comment_tags, options):
    """Extract messages from Django template files.

    :param fileobj: the file-like object the messages should be extracted from
    :param keywords: a list of keywords (i.e. function names) that should
                     be recognized as translation functions
    :param comment_tags: a list of translator tags to search for and
                         include in the results
    :param options: a dictionary of additional options (optional)
    :return: an iterator over ``(lineno, funcname, message, comments)``
             tuples
    :rtype: ``iterator``
    """
    intrans = False
    inplural = False
    trimmed = False
    message_context = None
    singular = []
    plural = []
    lineno = 1

    encoding = options.get('encoding', 'utf8')
    text = fileobj.read().decode(encoding)

    try:
        text_lexer = Lexer(text)
    except TypeError:
        # Django 1.9 changed the way we invoke Lexer; older versions
        # require two parameters.
        text_lexer = Lexer(text, None)

    for t in text_lexer.tokenize():
        lineno += t.contents.count('\n')
        if intrans:
            if t.token_type == TOKEN_BLOCK:
                endbmatch = endblock_re.match(t.contents)
                pluralmatch = plural_re.match(t.contents)
                if endbmatch:
                    if inplural:
                        if message_context:
                            yield (
                                lineno,
                                'npgettext',
                                [smart_text(message_context),
                                 smart_text(join_tokens(singular, trimmed)),
                                 smart_text(join_tokens(plural, trimmed))],
                                [],
                            )
                        else:
                            yield (
                                lineno,
                                'ngettext',
                                (smart_text(join_tokens(singular, trimmed)),
                                 smart_text(join_tokens(plural, trimmed))),
                                [])
                    else:
                        if message_context:
                            yield (
                                lineno,
                                'pgettext',
                                [smart_text(message_context),
                                 smart_text(join_tokens(singular, trimmed))],
                                [],
                            )
                        else:
                            yield (
                                lineno,
                                None,
                                smart_text(join_tokens(singular, trimmed)),
                                [])

                    intrans = False
                    inplural = False
                    message_context = None
                    singular = []
                    plural = []
                elif pluralmatch:
                    inplural = True
                else:
                    raise SyntaxError('Translation blocks must not include '
                                      'other block tags: %s' % t.contents)
            elif t.token_type == TOKEN_VAR:
                if inplural:
                    plural.append('%%(%s)s' % t.contents)
                else:
                    singular.append('%%(%s)s' % t.contents)
            elif t.token_type == TOKEN_TEXT:
                if inplural:
                    plural.append(t.contents)
                else:
                    singular.append(t.contents)
        else:
            if t.token_type == TOKEN_BLOCK:
                imatch = inline_re.match(t.contents)
                bmatch = block_re.match(t.contents)
                cmatches = constant_re.findall(t.contents)
                if imatch:
                    g = imatch.group(1)
                    g = strip_quotes(g)
                    message_context = imatch.group(3)
                    if message_context:
                        # strip quotes
                        message_context = message_context[1:-1]
                        yield (
                            lineno,
                            'pgettext',
                            [smart_text(message_context), smart_text(g)],
                            [],
                        )
                        message_context = None
                    else:
                        yield lineno, None, smart_text(g), []
                elif bmatch:
                    if bmatch.group(2):
                        message_context = bmatch.group(2)[1:-1]
                    for fmatch in constant_re.findall(t.contents):
                        stripped_fmatch = strip_quotes(fmatch)
                        yield lineno, None, smart_text(stripped_fmatch), []
                    intrans = True
                    inplural = False
                    trimmed = 'trimmed' in t.split_contents()
                    singular = []
                    plural = []
                elif cmatches:
                    for cmatch in cmatches:
                        stripped_cmatch = strip_quotes(cmatch)
                        yield lineno, None, smart_text(stripped_cmatch), []
            elif t.token_type == TOKEN_VAR:
                parts = t.contents.split('|')
                cmatch = constant_re.match(parts[0])
                if cmatch:
                    stripped_cmatch = strip_quotes(cmatch.group(1))
                    yield lineno, None, smart_text(stripped_cmatch), []
                for p in parts[1:]:
                    if p.find(':_(') >= 0:
                        p1 = p.split(':', 1)[1]
                        if p1[0] == '_':
                            p1 = p1[1:]
                        if p1[0] == '(':
                            p1 = p1.strip('()')
                        p1 = strip_quotes(p1)
                        yield lineno, None, smart_text(p1), []
示例#15
0
                {% else %}
                    <span class="mini quiet">{% trans 'Unknown content' %}</span>
                {% endif %}
            </li>
            {% endfor %}
            </ul>
            {% endif %}
    </div>
</div>
{% endblock %}
"""

settings.configure()

apps.populate((
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
))

elapsed = 0
lexer = Lexer(template_source, None)
tokens = lexer.tokenize()

for i in xrange(500):
    parser = Parser(list(tokens))
    parser.parse()
示例#16
0
    def lines(self):
        source_lines = set()

        if SHOW_PARSING:
            print("-------------- {}".format(self.filename))

        if django.VERSION >= (1, 9):
            lexer = Lexer(self.source())
        else:
            lexer = Lexer(self.source(), self.filename)
        tokens = lexer.tokenize()

        # Are we inside a comment?
        comment = False
        # Is this a template that extends another template?
        extends = False
        # Are we inside a block?
        inblock = False

        for token in tokens:
            if SHOW_PARSING:
                print("%10s %2d: %r" % (
                    _token_name(token.token_type),
                    token.lineno,
                    token.contents,
                ))
            if token.token_type == TokenType.BLOCK:
                if token.contents == "endcomment":
                    comment = False
                    continue

            if comment:
                continue

            if token.token_type == TokenType.BLOCK:
                if token.contents.startswith("endblock"):
                    inblock = False
                elif token.contents.startswith("block"):
                    inblock = True
                    if extends:
                        continue

                if extends and not inblock:
                    # In an inheriting tempalte, ignore all tags outside of
                    # blocks.
                    continue

                if token.contents == "comment":
                    comment = True
                if token.contents.startswith("end"):
                    continue
                elif token.contents in ("else", "empty"):
                    continue
                elif token.contents.startswith("elif"):
                    # NOTE: I don't like this, I want to be able to trace elif
                    # nodes, but the Django template engine doesn't track them
                    # in a way that we can get useful information from them.
                    continue
                elif token.contents.startswith("extends"):
                    extends = True

                source_lines.add(token.lineno)

            elif token.token_type == TokenType.VAR:
                source_lines.add(token.lineno)

            elif token.token_type == TokenType.TEXT:
                if extends and not inblock:
                    continue
                # Text nodes often start with newlines, but we don't want to
                # consider that first line to be part of the text.
                lineno = token.lineno
                lines = token.contents.splitlines(True)
                num_lines = len(lines)
                if lines[0].isspace():
                    lineno += 1
                    num_lines -= 1
                source_lines.update(range(lineno, lineno + num_lines))

            if SHOW_PARSING:
                print("\t\t\tNow source_lines is: {!r}".format(source_lines))

        return source_lines
示例#17
0
                {% else %}
                    <span class="mini quiet">{% trans 'Unknown content' %}</span>
                {% endif %}
            </li>
            {% endfor %}
            </ul>
            {% endif %}
    </div>
</div>
{% endblock %}
"""

settings.configure()

apps.populate((
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
    'django.contrib.staticfiles',
))

elapsed = 0
lexer = Lexer(template_source, None)
tokens = lexer.tokenize()

for i in xrange(500):
    parser = Parser(list(tokens))
    parser.parse()