Ejemplo n.º 1
0
def get_jinja_locals(real_locals):
    ctx = real_locals.get('context')
    if ctx:
        locals = ctx.get_all().copy()
    else:
        locals = {}

    local_overrides = {}

    for name, value in iteritems(real_locals):
        if not name.startswith('l_') or value is missing:
            continue
        try:
            _, depth, name = name.split('_', 2)
            depth = int(depth)
        except ValueError:
            continue
        cur_depth = local_overrides.get(name, (-1,))[0]
        if cur_depth < depth:
            local_overrides[name] = (depth, value)

    for name, (_, value) in iteritems(local_overrides):
        if value is missing:
            locals.pop(name, None)
        else:
            locals[name] = value

    return locals
Ejemplo n.º 2
0
    def single_rest(self, ini_file):

        meta = Meta()
        resources = []
        sections = {}

        if not os.path.exists(ini_file):
            logger.warning("File '%s' does not exist! Skipping." % ini_file)
            return resources

        #########################
        # Read the configuration inside this init file
        # INI CASE
        try:
            sections = self.read_config(ini_file)
        except configparser.MissingSectionHeaderError:
            logger.warning("'%s' file is not in base format" % ini_file)
            # print(e)  # DEBUG?
            # JSON CASE
            try:
                sections = self.read_complex_config(ini_file)
            except json.commentjson.JSONLibraryException:
                logger.critical(
                    "Failed to read also complex format too." +
                    "\nPlease verify that your file is in " +
                    "'ini' or 'json' format!")
                exit(1)

        #########################
        # Use sections found
        for section, items in iteritems(sections):

            logger.info("Configuration read: {Section: " + section + "}")

            module = meta.get_module_from_string(
                __package__ + '.resources.' + section)
            # Skip what you cannot use
            if module is None:
                logger.warning("Could not find module '%s'..." % section)
                continue

            for classname, endpoints in iteritems(items):
                myclass = meta.get_class_from_string(classname, module)
                # Again skip
                if myclass is None:
                    continue
                else:
                    logger.debug("REST! Found resource: " +
                                 section + '.' + classname)

                # Get the best endpoint comparing inside against configuration
                instance = myclass()

                oldendpoint, endkey = instance.get_endpoint()
                if len(endpoints) < 1:
                    endpoints = [oldendpoint]
                resources.append((myclass, instance, endpoints, endkey))

        return resources
Ejemplo n.º 3
0
    def signature(self, node, frame, extra_kwargs=None):
        """Writes a function call to the stream for the current node.
        A leading comma is added automatically.  The extra keyword
        arguments may not include python keywords otherwise a syntax
        error could occour.  The extra keyword arguments should be given
        as python dict.
        """
        # if any of the given keyword arguments is a python keyword
        # we have to make sure that no invalid call is created.
        kwarg_workaround = False
        for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
            if is_python_keyword(kwarg):
                kwarg_workaround = True
                break

        for arg in node.args:
            self.write(', ')
            self.visit(arg, frame)

        if not kwarg_workaround:
            for kwarg in node.kwargs:
                self.write(', ')
                self.visit(kwarg, frame)
            if extra_kwargs is not None:
                for key, value in iteritems(extra_kwargs):
                    self.write(', %s=%s' % (key, value))
        if node.dyn_args:
            self.write(', *')
            self.visit(node.dyn_args, frame)

        if kwarg_workaround:
            if node.dyn_kwargs is not None:
                self.write(', **dict({')
            else:
                self.write(', **{')
            for kwarg in node.kwargs:
                self.write('%r: ' % kwarg.key)
                self.visit(kwarg.value, frame)
                self.write(', ')
            if extra_kwargs is not None:
                for key, value in iteritems(extra_kwargs):
                    self.write('%r: %s, ' % (key, value))
            if node.dyn_kwargs is not None:
                self.write('}, **')
                self.visit(node.dyn_kwargs, frame)
                self.write(')')
            else:
                self.write('}')

        elif node.dyn_kwargs is not None:
            self.write(', **')
            self.visit(node.dyn_kwargs, frame)
Ejemplo n.º 4
0
    def branch_update(self, branch_symbols):
        stores = {}
        for branch in branch_symbols:
            for target in branch.stores:
                if target in self.stores:
                    continue
                stores[target] = stores.get(target, 0) + 1

        for sym in branch_symbols:
            self.refs.update(sym.refs)
            self.loads.update(sym.loads)
            self.stores.update(sym.stores)

        for name, branch_count in iteritems(stores):
            if branch_count == len(branch_symbols):
                continue
            target = self.find_ref(name)
            assert target is not None, 'should not happen'

            if self.parent is not None:
                outer_target = self.parent.find_ref(name)
                if outer_target is not None:
                    self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
                    continue
            self.loads[target] = (VAR_LOAD_RESOLVE, name)
Ejemplo n.º 5
0
def do_xmlattr(_eval_ctx, d, autospace=True):
    """Create an SGML/XML attribute string based on the items in a dict.
    All values that are neither `none` nor `undefined` are automatically
    escaped:

    .. sourcecode:: html+jinja

        <ul{{ {'class': 'my_list', 'missing': none,
                'id': 'list-%d'|format(variable)}|xmlattr }}>
        ...
        </ul>

    Results in something like this:

    .. sourcecode:: html

        <ul class="my_list" id="list-42">
        ...
        </ul>

    As you can see it automatically prepends a space in front of the item
    if the filter returned something unless the second parameter is false.
    """
    rv = u' '.join(
        u'%s="%s"' % (escape(key), escape(value))
        for key, value in iteritems(d)
        if value is not None and not isinstance(value, Undefined)
    )
    if autospace and rv:
        rv = u' ' + rv
    if _eval_ctx.autoescape:
        rv = Markup(rv)
    return rv
Ejemplo n.º 6
0
    def rest(self):
        """ REST endpoints from '.ini' files """

        logger.debug("Trying configurations from '%s' dir" % REST_CONFIG)

        files = []
        if os.path.exists(REST_INIT):
            import commentjson as json
            with open(REST_INIT) as f:
                mydict = json.load(f)
                for name, jfile in iteritems(mydict):
                    files.append(os.path.join(REST_CONFIG, jfile))
        # What if the user does not specify anything?
        else:
            # # ALL ?
            # logger.debug("Reading all resources config files")
            # import glob
            # files = glob.glob(os.path.join(REST_CONFIG, "*") + ".ini")

            # # ONLY THE EXAMPLE
            files.append(os.path.join(REST_CONFIG, DEFAULT_REST_CONFIG))
        logger.debug("Resources files: '%s'" % files)

        resources = []
        for ini_file in files:
            logger.info("REST configuration file '%s'" % ini_file)
            # Add all resources from this single ini file
            resources.extend(self.single_rest(ini_file))

        return resources
Ejemplo n.º 7
0
 def derived(self, locals=None):
     """Internal helper function to create a derived context."""
     context = new_context(self.environment, self.name, {},
                           self.parent, True, None, locals)
     context.vars.update(self.vars)
     context.eval_ctx = self.eval_ctx
     context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
     return context
Ejemplo n.º 8
0
 def test_operators(self):
     from jinja2.lexer import operators
     for test, expect in iteritems(operators):
         if test in '([{}])':
             continue
         stream = env.lexer.tokenize('{{{{ {0!s} }}}}'.format(test))
         next(stream)
         assert stream.current.type == expect
Ejemplo n.º 9
0
 def extend(self, **attributes):
     """Add the items to the instance of the environment if they do not exist
     yet.  This is used by :ref:`extensions <writing-extensions>` to register
     callbacks and configuration values without breaking inheritance.
     """
     for key, value in iteritems(attributes):
         if not hasattr(self, key):
             setattr(self, key, value)
Ejemplo n.º 10
0
 def dump_param_targets(self):
     rv = set()
     node = self
     while node is not None:
         for target, (instr, _) in iteritems(self.loads):
             if instr == VAR_LOAD_PARAMETER:
                 rv.add(target)
         node = node.parent
     return rv
Ejemplo n.º 11
0
    def overlay(self, block_start_string=missing, block_end_string=missing,
                variable_start_string=missing, variable_end_string=missing,
                comment_start_string=missing, comment_end_string=missing,
                line_statement_prefix=missing, line_comment_prefix=missing,
                trim_blocks=missing, lstrip_blocks=missing,
                extensions=missing, optimized=missing,
                undefined=missing, finalize=missing, autoescape=missing,
                loader=missing, cache_size=missing, auto_reload=missing,
                bytecode_cache=missing):
        """Create a new overlay environment that shares all the data with the
        current environment except for cache and the overridden attributes.
        Extensions cannot be removed for an overlayed environment.  An overlayed
        environment automatically gets all the extensions of the environment it
        is linked to plus optional extra extensions.

        Creating overlays should happen after the initial environment was set
        up completely.  Not all attributes are truly linked, some are just
        copied over so modifications on the original environment may not shine
        through.
        """
        args = dict(locals())
        del args['self'], args['cache_size'], args['extensions']

        rv = object.__new__(self.__class__)
        rv.__dict__.update(self.__dict__)
        rv.overlayed = True
        rv.linked_to = self

        for key, value in iteritems(args):
            if value is not missing:
                setattr(rv, key, value)

        if cache_size is not missing:
            rv.cache = create_cache(cache_size)
        else:
            rv.cache = copy_cache(self.cache)

        rv.extensions = {}
        for key, value in iteritems(self.extensions):
            rv.extensions[key] = value.bind(rv)
        if extensions is not missing:
            rv.extensions.update(load_extensions(rv, extensions))

        return _environment_sanity_check(rv)
Ejemplo n.º 12
0
 def derived(self, locals=None):
     """Internal helper function to create a derived context.  This is
     used in situations where the system needs a new context in the same
     template that is independent.
     """
     context = new_context(self.environment, self.name, {},
                           self.get_all(), True, None, locals)
     context.eval_ctx = self.eval_ctx
     context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
     return context
Ejemplo n.º 13
0
    def __init__(self, environment, parent, name, blocks):
        self.parent = parent
        self.vars = {}
        self.environment = environment
        self.eval_ctx = EvalContext(self.environment, name)
        self.exported_vars = set()
        self.name = name

        # create the initial mapping of blocks.  Whenever template inheritance
        # takes place the runtime will update this mapping with the new blocks
        # from the template.
        self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
Ejemplo n.º 14
0
 def pop_scope(self, aliases, frame):
     """Restore all aliases and delete unused variables."""
     for name, alias in iteritems(aliases):
         self.writeline('l_%s = %s' % (name, alias))
     to_delete = set()
     for name in frame.identifiers.declared_locally:
         if name not in aliases:
             to_delete.add('l_' + name)
     if to_delete:
         # we cannot use the del statement here because enclosed
         # scopes can trigger a SyntaxError:
         #   a = 42; b = lambda: a; del a
         self.writeline(' = '.join(to_delete) + ' = missing')
Ejemplo n.º 15
0
    def _make_node(self, singular, plural, variables, plural_expr,
                   vars_referenced, num_called_num):
        """Generates a useful node from the data provided."""
        # no variables referenced?  no need to escape for old style
        # gettext invocations only if there are vars.
        if not vars_referenced and not self.environment.newstyle_gettext:
            singular = singular.replace('%%', '%')
            if plural:
                plural = plural.replace('%%', '%')

        # singular only:
        if plural_expr is None:
            gettext = nodes.Name('gettext', 'load')
            node = nodes.Call(gettext, [nodes.Const(singular)],
                              [], None, None)

        # singular and plural
        else:
            ngettext = nodes.Name('ngettext', 'load')
            node = nodes.Call(ngettext, [
                nodes.Const(singular),
                nodes.Const(plural),
                plural_expr
            ], [], None, None)

        # in case newstyle gettext is used, the method is powerful
        # enough to handle the variable expansion and autoescape
        # handling itself
        if self.environment.newstyle_gettext:
            for key, value in iteritems(variables):
                # the function adds that later anyways in case num was
                # called num, so just skip it.
                if num_called_num and key == 'num':
                    continue
                node.kwargs.append(nodes.Keyword(key, value))

        # otherwise do that here
        else:
            # mark the return value as safe if we are in an
            # environment with autoescaping turned on
            node = nodes.MarkSafeIfAutoescape(node)
            if variables:
                node = nodes.Mod(node, nodes.Dict([
                    nodes.Pair(nodes.Const(key), value)
                    for key, value in variables.items()
                ]))
        return nodes.Output([node])
Ejemplo n.º 16
0
    def __init__(self, environment, parent, name, blocks):
        self.parent = parent
        self.vars = {}
        self.environment = environment
        self.eval_ctx = EvalContext(self.environment, name)
        self.exported_vars = set()
        self.name = name

        # create the initial mapping of blocks.  Whenever template inheritance
        # takes place the runtime will update this mapping with the new blocks
        # from the template.
        self.blocks = dict((k, [v]) for k, v in iteritems(blocks))

        # In case we detect the fast resolve mode we can set up an alias
        # here that bypasses the legacy code logic.
        if self._fast_resolve_mode:
            self.resolve_or_missing = MethodType(resolve_or_missing, self)
Ejemplo n.º 17
0
    def __init__(self, environment, parent, name, blocks):
        self.parent = parent
        self.vars = {}
        self.environment = environment
        self.eval_ctx = EvalContext(self.environment, name)
        self.exported_vars = set()
        self.name = name

        # create the initial mapping of blocks.  Whenever template inheritance
        # takes place the runtime will update this mapping with the new blocks
        # from the template.
        self.blocks = dict((k, [v]) for k, v in iteritems(blocks))

        # In case we detect the fast resolve mode we can set up an alias
        # here that bypasses the legacy code logic.
        if self._fast_resolve_mode:
            self.resolve_or_missing = MethodType(resolve_or_missing, self)
Ejemplo n.º 18
0
def do_urlencode(value):
    """Escape strings for use in URLs (uses UTF-8 encoding).  It accepts both
    dictionaries and regular strings as well as pairwise iterables.

    .. versionadded:: 2.7
    """
    itemiter = None
    if isinstance(value, dict):
        itemiter = iteritems(value)
    elif not isinstance(value, string_types):
        try:
            itemiter = iter(value)
        except TypeError:
            pass
    if itemiter is None:
        return unicode_urlencode(value)
    return u'&'.join(
        unicode_urlencode(k) + '=' + unicode_urlencode(v) for k, v in itemiter)
Ejemplo n.º 19
0
def new_context(environment, template_name, blocks, vars=None,
                shared=None, globals=None, locals=None):
    """Internal helper to for context creation."""
    if vars is None:
        vars = {}
    if shared:
        parent = vars
    else:
        parent = dict(globals or (), **vars)
    if locals:
        # if the parent is shared a copy should be created because
        # we don't want to modify the dict passed
        if shared:
            parent = dict(parent)
        for key, value in iteritems(locals):
            if key[:2] == 'l_' and value is not missing:
                parent[key[2:]] = value
    return Context(environment, parent, template_name, blocks)
def new_context(environment, template_name, blocks, vars=None,
                shared=None, globals=None, locals=None):
    """Internal helper to for context creation."""
    if vars is None:
        vars = {}
    if shared:
        parent = vars
    else:
        parent = dict(globals or (), **vars)
    if locals:
        # if the parent is shared a copy should be created because
        # we don't want to modify the dict passed
        if shared:
            parent = dict(parent)
        for key, value in iteritems(locals):
            if key[:2] == 'l_' and value is not missing:
                parent[key[2:]] = value
    return Context(environment, parent, template_name, blocks)
Ejemplo n.º 21
0
Archivo: filters.py Proyecto: 5y/jinja2
def do_urlencode(value):
    """Escape strings for use in URLs (uses UTF-8 encoding).  It accepts both
    dictionaries and regular strings as well as pairwise iterables.

    .. versionadded:: 2.7
    """
    itemiter = None
    if isinstance(value, dict):
        itemiter = iteritems(value)
    elif not isinstance(value, string_types):
        try:
            itemiter = iter(value)
        except TypeError:
            pass
    if itemiter is None:
        return unicode_urlencode(value)
    return u'&'.join(unicode_urlencode(k) + '=' +
                     unicode_urlencode(v) for k, v in itemiter)
Ejemplo n.º 22
0
    def single_rest(self, ini_file):

        meta = Meta()
        resources = []

        if not os.path.exists(ini_file):
            logger.warning("File '%s' does not exist! Skipping." % ini_file)
            return resources

        # Read the configuration inside this init file
        config = self.read_config(ini_file)

        for section in config.sections():

            logger.info("Configuration read: {Section: " + section + "}")

            module = meta.get_module_from_string(__package__ + '.resources.' +
                                                 section)
            # Skip what you cannot use
            if module is None:
                logger.warning("Could not find module '%s'..." % section)
                continue

            for classname, endpoint in iteritems(dict(config.items(section))):

                myclass = meta.get_class_from_string(classname, module)
                # Again skip
                if myclass is None:
                    continue
                else:
                    logger.debug("REST! Found resource: " + section + '.' +
                                 classname)

                # Get the best endpoint comparing inside against configuration
                instance = myclass()
                oldendpoint, endkey = instance.get_endpoint()
                if endpoint.strip() == '':
                    endpoint = oldendpoint

                resources.append((myclass, instance, endpoint, endkey))

        return resources
Ejemplo n.º 23
0
    def single_rest(self, ini_file):

        meta = Meta()
        resources = []

        if not os.path.exists(ini_file):
            logger.warning("File '%s' does not exist! Skipping." % ini_file)
            return resources

        # Read the configuration inside this init file
        config = self.read_config(ini_file)

        for section in config.sections():

            logger.info("Configuration read: {Section: " + section + "}")

            module = meta.get_module_from_string(
                __package__ + '.resources.' + section)
            # Skip what you cannot use
            if module is None:
                logger.warning("Could not find module '%s'..." % section)
                continue

            for classname, endpoint in iteritems(dict(config.items(section))):

                myclass = meta.get_class_from_string(classname, module)
                # Again skip
                if myclass is None:
                    continue
                else:
                    logger.debug("REST! Found resource: " +
                                 section + '.' + classname)

                # Get the best endpoint comparing inside against configuration
                instance = myclass()
                oldendpoint, endkey = instance.get_endpoint()
                if endpoint.strip() == '':
                    endpoint = oldendpoint

                resources.append((myclass, instance, endpoint, endkey))

        return resources
Ejemplo n.º 24
0
def has_safe_repr(value):
    """Does the node have a safe representation?"""
    if value is None or value is NotImplemented or value is Ellipsis:
        return True
    if isinstance(value, (bool, int, float, complex, range_type,
            Markup) + string_types):
        return True
    if isinstance(value, (tuple, list, set, frozenset)):
        for item in value:
            if not has_safe_repr(item):
                return False
        return True
    elif isinstance(value, dict):
        for key, value in iteritems(value):
            if not has_safe_repr(key):
                return False
            if not has_safe_repr(value):
                return False
        return True
    return False
Ejemplo n.º 25
0
    def read_config(self, configfile, case_sensitive=True):
        """ A generic reader for 'ini' files via standard library """

        sections = {}

        if case_sensitive:
            # Make sure configuration is case sensitive
            config = configparser.RawConfigParser()
            config.optionxform = str
        else:
            config = configparser.ConfigParser()

        # Read
        config.read(configfile)
        for section in config.sections():
            print(section)
            elements = {}
            for classname, endpoint in iteritems(dict(config.items(section))):
                print(classname, endpoint)
                elements[classname] = [endpoint]
            sections[str(section)] = elements

        return sections
Ejemplo n.º 26
0
 def list_templates(self):
     result = []
     for prefix, loader in iteritems(self.mapping):
         for template in loader.list_templates():
             result.append(prefix + self.delimiter + template)
     return result
Ejemplo n.º 27
0
 def list_templates(self):
     result = []
     for prefix, loader in iteritems(self.mapping):
         for template in loader.list_templates():
             result.append(prefix + self.delimiter + template)
     return result
Ejemplo n.º 28
0
    def single_rest(self, config_file):

        meta = Meta()
        resources = []
        sections = {}

        if not os.path.exists(config_file):
            logger.warning("File '%s' does not exist! Skipping." % config_file)
            return resources

        #########################
        # Read the configuration inside this init file

        # JSON CASE
        try:
            sections = self.read_complex_config(config_file)
        except:  # json.commentjson.JSONLibraryException:
            logger.critical("Format error!\n" +
                            "'%s' file is not in JSON format" % config_file)
            exit(1)

        if 'apis' not in sections:
            logger.critical(
                "Section 'apis' not found in '%s' file" % config_file
            )
            exit(1)

        #########################
        # Use sections found
        for section, items in iteritems(sections['apis']):

            logger.debug("Configuration read: {Section: " + section + "}")

            module = meta.get_module_from_string(
                __package__ + '.resources.custom.' + section)
            # Skip what you cannot use
            if module is None:
                logger.warning("Could not find module '%s'..." % section)
                continue

            for classname, endpoints in iteritems(items):
                myclass = meta.get_class_from_string(classname, module)
                # Again skip
                if myclass is None:
                    continue
                else:
                    logger.debug("REST! Found resource: " +
                                 section + '.' + classname)

                # Get the best endpoint comparing inside against configuration
                instance = myclass()

                oldendpoint, endkey, endtype = instance.get_endpoint()
                if len(endpoints) < 1:
                    endpoints = [oldendpoint]

                endpoint_id = None
                if endkey is not None and endtype is not None:
                    endpoint_id = endtype + ':' + endkey

                resources.append((myclass, instance, endpoints, endpoint_id))

        return resources
Ejemplo n.º 29
0
def fake_exc_info(exc_info, filename, lineno):
    """Helper for `translate_exception`."""
    exc_type, exc_value, tb = exc_info

    # figure the real context out
    if tb is not None:
        real_locals = tb.tb_frame.f_locals.copy()
        ctx = real_locals.get('context')
        if ctx:
            locals = ctx.get_all()
        else:
            locals = {}
        for name, value in iteritems(real_locals):
            if name.startswith('l_') and value is not missing:
                locals[name[2:]] = value

        # if there is a local called __jinja_exception__, we get
        # rid of it to not break the debug functionality.
        locals.pop('__jinja_exception__', None)
    else:
        locals = {}

    # assamble fake globals we need
    globals = {
        '__name__': filename,
        '__file__': filename,
        '__jinja_exception__': exc_info[:2],

        # we don't want to keep the reference to the template around
        # to not cause circular dependencies, but we mark it as Jinja
        # frame for the ProcessedTraceback
        '__jinja_template__': None
    }

    # and fake the exception
    code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')

    # if it's possible, change the name of the code.  This won't work
    # on some python environments such as google appengine
    try:
        if tb is None:
            location = 'template'
        else:
            function = tb.tb_frame.f_code.co_name
            if function == 'root':
                location = 'top-level template code'
            elif function.startswith('block_'):
                location = 'block "%s"' % function[6:]
            else:
                location = 'template'
        code = code_type(0, code.co_nlocals, code.co_stacksize, code.co_flags,
                         code.co_code, code.co_consts, code.co_names,
                         code.co_varnames, filename, location,
                         code.co_firstlineno, code.co_lnotab, (), ())
    except:
        pass

    # execute the code and catch the new traceback
    try:
        exec(code, globals, locals)
    except:
        exc_info = sys.exc_info()
        new_tb = exc_info[2].tb_next

    # return without this frame
    return exc_info[:2] + (new_tb, )
Ejemplo n.º 30
0
    def tokeniter(self, source, name, filename=None, state=None):
        """This method tokenizes the text and returns the tokens in a
        generator.  Use this method if you just want to tokenize a template.
        """
        source = text_type(source)
        lines = source.splitlines()
        if self.keep_trailing_newline and source:
            for newline in ('\r\n', '\r', '\n'):
                if source.endswith(newline):
                    lines.append('')
                    break
        source = '\n'.join(lines)
        pos = 0
        lineno = 1
        stack = ['root']
        if state is not None and state != 'root':
            assert state in ('variable', 'block'), 'invalid state'
            stack.append(state + '_begin')
        else:
            state = 'root'
        statetokens = self.rules[stack[-1]]
        source_length = len(source)

        balancing_stack = []

        while 1:
            # tokenizer loop
            for regex, tokens, new_state in statetokens:
                m = regex.match(source, pos)
                # if no match we try again with the next rule
                if m is None:
                    continue

                # we only match blocks and variables if braces / parentheses
                # are balanced. continue parsing with the lower rule which
                # is the operator rule. do this only if the end tags look
                # like operators
                if balancing_stack and \
                   tokens in ('variable_end', 'block_end',
                              'linestatement_end'):
                    continue

                # tuples support more options
                if isinstance(tokens, tuple):
                    for idx, token in enumerate(tokens):
                        # failure group
                        if token.__class__ is Failure:
                            raise token(lineno, filename)
                        # bygroup is a bit more complex, in that case we
                        # yield for the current token the first named
                        # group that matched
                        elif token == '#bygroup':
                            for key, value in iteritems(m.groupdict()):
                                if value is not None:
                                    yield lineno, key, value
                                    lineno += value.count('\n')
                                    break
                            else:
                                raise RuntimeError('%r wanted to resolve '
                                                   'the token dynamically'
                                                   ' but no group matched'
                                                   % regex)
                        # normal group
                        else:
                            data = m.group(idx + 1)
                            if data or token not in ignore_if_empty:
                                yield lineno, token, data
                            lineno += data.count('\n')

                # strings as token just are yielded as it.
                else:
                    data = m.group()
                    # update brace/parentheses balance
                    if tokens == 'operator':
                        if data == '{':
                            balancing_stack.append('}')
                        elif data == '(':
                            balancing_stack.append(')')
                        elif data == '[':
                            balancing_stack.append(']')
                        elif data in ('}', ')', ']'):
                            if not balancing_stack:
                                raise TemplateSyntaxError('unexpected \'%s\'' %
                                                          data, lineno, name,
                                                          filename)
                            expected_op = balancing_stack.pop()
                            if expected_op != data:
                                raise TemplateSyntaxError('unexpected \'%s\', '
                                                          'expected \'%s\'' %
                                                          (data, expected_op),
                                                          lineno, name,
                                                          filename)
                    # yield items
                    if data or tokens not in ignore_if_empty:
                        yield lineno, tokens, data
                    lineno += data.count('\n')

                # fetch new position into new variable so that we can check
                # if there is a internal parsing error which would result
                # in an infinite loop
                pos2 = m.end()

                # handle state changes
                if new_state is not None:
                    # remove the uppermost state
                    if new_state == '#pop':
                        stack.pop()
                    # resolve the new state by group checking
                    elif new_state == '#bygroup':
                        for key, value in iteritems(m.groupdict()):
                            if value is not None:
                                stack.append(key)
                                break
                        else:
                            raise RuntimeError('%r wanted to resolve the '
                                               'new state dynamically but'
                                               ' no group matched' %
                                               regex)
                    # direct state name given
                    else:
                        stack.append(new_state)
                    statetokens = self.rules[stack[-1]]
                # we are still at the same position and no stack change.
                # this means a loop without break condition, avoid that and
                # raise error
                elif pos2 == pos:
                    raise RuntimeError('%r yielded empty string without '
                                       'stack change' % regex)
                # publish new function and start again
                pos = pos2
                break
            # if loop terminated without break we haven't found a single match
            # either we are at the end of the file or we have a problem
            else:
                # end of text
                if pos >= source_length:
                    return
                # something went wrong
                raise TemplateSyntaxError('unexpected char %r at %d' %
                                          (source[pos], pos), lineno,
                                          name, filename)
Ejemplo n.º 31
0
    '}':            TOKEN_RBRACE,
    '==':           TOKEN_EQ,
    '!=':           TOKEN_NE,
    '>':            TOKEN_GT,
    '>=':           TOKEN_GTEQ,
    '<':            TOKEN_LT,
    '<=':           TOKEN_LTEQ,
    '=':            TOKEN_ASSIGN,
    '.':            TOKEN_DOT,
    ':':            TOKEN_COLON,
    '|':            TOKEN_PIPE,
    ',':            TOKEN_COMMA,
    ';':            TOKEN_SEMICOLON
}

reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
                         sorted(operators, key=lambda x: -len(x))))

ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
                            TOKEN_COMMENT_END, TOKEN_WHITESPACE,
                            TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN,
                            TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
                             TOKEN_COMMENT, TOKEN_LINECOMMENT])


def _describe_token_type(token_type):
    if token_type in reverse_operators:
        return reverse_operators[token_type]
Ejemplo n.º 32
0
 def enter_frame(self, frame):
     """Remember all undeclared identifiers."""
     CodeGenerator.enter_frame(self, frame)
     for _, (action, param) in iteritems(frame.symbols.loads):
         if action == 'resolve':
             self.undeclared_identifiers.add(param)
Ejemplo n.º 33
0
    def visit_Template(self, node, frame=None):
        assert frame is None, 'no root frame allowed'
        eval_ctx = EvalContext(self.environment, self.name)

        from jinja2.runtime import __all__ as exported
        self.writeline('from __future__ import division')
        self.writeline('from jinja2.runtime import ' + ', '.join(exported))
        if not unoptimize_before_dead_code:
            self.writeline('dummy = lambda *x: None')

        # if we want a deferred initialization we cannot move the
        # environment into a local name
        envenv = not self.defer_init and ', environment=environment' or ''

        # do we have an extends tag at all?  If not, we can save some
        # overhead by just not processing any inheritance code.
        have_extends = node.find(nodes.Extends) is not None

        # find all blocks
        for block in node.find_all(nodes.Block):
            if block.name in self.blocks:
                self.fail('block %r defined twice' % block.name, block.lineno)
            self.blocks[block.name] = block

        # find all imports and import them
        for import_ in node.find_all(nodes.ImportedName):
            if import_.importname not in self.import_aliases:
                imp = import_.importname
                self.import_aliases[imp] = alias = self.temporary_identifier()
                if '.' in imp:
                    module, obj = imp.rsplit('.', 1)
                    self.writeline('from %s import %s as %s' %
                                   (module, obj, alias))
                else:
                    self.writeline('import %s as %s' % (imp, alias))

        # add the load name
        self.writeline('name = %r' % self.name)

        # generate the root render function.
        self.writeline('def root(context%s):' % envenv, extra=1)

        # process the root
        frame = Frame(eval_ctx)
        frame.inspect(node.body)
        frame.toplevel = frame.rootlevel = True
        frame.require_output_check = have_extends and not self.has_known_extends
        self.indent()
        if have_extends:
            self.writeline('parent_template = None')
        if 'self' in find_undeclared(node.body, ('self',)):
            frame.identifiers.add_special('self')
            self.writeline('l_self = TemplateReference(context)')
        self.pull_locals(frame)
        self.pull_dependencies(node.body)
        self.blockvisit(node.body, frame)
        self.outdent()

        # make sure that the parent root is called.
        if have_extends:
            if not self.has_known_extends:
                self.indent()
                self.writeline('if parent_template is not None:')
            self.indent()
            self.writeline('for event in parent_template.'
                           'root_render_func(context):')
            self.indent()
            self.writeline('yield event')
            self.outdent(2 + (not self.has_known_extends))

        # at this point we now have the blocks collected and can visit them too.
        for name, block in iteritems(self.blocks):
            block_frame = Frame(eval_ctx)
            block_frame.inspect(block.body)
            block_frame.block = name
            self.writeline('def block_%s(context%s):' % (name, envenv),
                           block, 1)
            self.indent()
            undeclared = find_undeclared(block.body, ('self', 'super'))
            if 'self' in undeclared:
                block_frame.identifiers.add_special('self')
                self.writeline('l_self = TemplateReference(context)')
            if 'super' in undeclared:
                block_frame.identifiers.add_special('super')
                self.writeline('l_super = context.super(%r, '
                               'block_%s)' % (name, name))
            self.pull_locals(block_frame)
            self.pull_dependencies(block.body)
            self.blockvisit(block.body, block_frame)
            self.outdent()

        self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
                                                   for x in self.blocks),
                       extra=1)

        # add a function that returns the debug info
        self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
                                                    in self.debug_info))
Ejemplo n.º 34
0
    def tokeniter(self, source, name, filename=None, state=None):
        """This method tokenizes the text and returns the tokens in a
        generator.  Use this method if you just want to tokenize a template.
        """
        source = text_type(source)
        lines = source.splitlines()
        if self.keep_trailing_newline and source:
            for newline in ('\r\n', '\r', '\n'):
                if source.endswith(newline):
                    lines.append('')
                    break
        source = '\n'.join(lines)
        pos = 0
        lineno = 1
        stack = ['root']
        if state is not None and state != 'root':
            assert state in ('variable', 'block'), 'invalid state'
            stack.append(state + '_begin')
        else:
            state = 'root'
        statetokens = self.rules[stack[-1]]
        source_length = len(source)

        balancing_stack = []

        while 1:
            # tokenizer loop
            for regex, tokens, new_state in statetokens:
                m = regex.match(source, pos)
                # if no match we try again with the next rule
                if m is None:
                    continue

                # we only match blocks and variables if braces / parentheses
                # are balanced. continue parsing with the lower rule which
                # is the operator rule. do this only if the end tags look
                # like operators
                if balancing_stack and \
                   tokens in ('variable_end', 'block_end',
                              'linestatement_end'):
                    continue

                # tuples support more options
                if isinstance(tokens, tuple):
                    for idx, token in enumerate(tokens):
                        # failure group
                        if token.__class__ is Failure:
                            raise token(lineno, filename)
                        # bygroup is a bit more complex, in that case we
                        # yield for the current token the first named
                        # group that matched
                        elif token == '#bygroup':
                            for key, value in iteritems(m.groupdict()):
                                if value is not None:
                                    yield lineno, key, value
                                    lineno += value.count('\n')
                                    break
                            else:
                                raise RuntimeError('%r wanted to resolve '
                                                   'the token dynamically'
                                                   ' but no group matched' %
                                                   regex)
                        # normal group
                        else:
                            data = m.group(idx + 1)
                            if data or token not in ignore_if_empty:
                                yield lineno, token, data
                            lineno += data.count('\n')

                # strings as token just are yielded as it.
                else:
                    data = m.group()
                    # update brace/parentheses balance
                    if tokens == 'operator':
                        if data == '{':
                            balancing_stack.append('}')
                        elif data == '(':
                            balancing_stack.append(')')
                        elif data == '[':
                            balancing_stack.append(']')
                        elif data in ('}', ')', ']'):
                            if not balancing_stack:
                                raise TemplateSyntaxError(
                                    'unexpected \'%s\'' % data, lineno, name,
                                    filename)
                            expected_op = balancing_stack.pop()
                            if expected_op != data:
                                raise TemplateSyntaxError(
                                    'unexpected \'%s\', '
                                    'expected \'%s\'' % (data, expected_op),
                                    lineno, name, filename)
                    # yield items
                    if data or tokens not in ignore_if_empty:
                        yield lineno, tokens, data
                    lineno += data.count('\n')

                # fetch new position into new variable so that we can check
                # if there is a internal parsing error which would result
                # in an infinite loop
                pos2 = m.end()

                # handle state changes
                if new_state is not None:
                    # remove the uppermost state
                    if new_state == '#pop':
                        stack.pop()
                    # resolve the new state by group checking
                    elif new_state == '#bygroup':
                        for key, value in iteritems(m.groupdict()):
                            if value is not None:
                                stack.append(key)
                                break
                        else:
                            raise RuntimeError('%r wanted to resolve the '
                                               'new state dynamically but'
                                               ' no group matched' % regex)
                    # direct state name given
                    else:
                        stack.append(new_state)
                    statetokens = self.rules[stack[-1]]
                # we are still at the same position and no stack change.
                # this means a loop without break condition, avoid that and
                # raise error
                elif pos2 == pos:
                    raise RuntimeError('%r yielded empty string without '
                                       'stack change' % regex)
                # publish new function and start again
                pos = pos2
                break
            # if loop terminated without break we haven't found a single match
            # either we are at the end of the file or we have a problem
            else:
                # end of text
                if pos >= source_length:
                    return
                # something went wrong
                raise TemplateSyntaxError(
                    'unexpected char %r at %d' % (source[pos], pos), lineno,
                    name, filename)
Ejemplo n.º 35
0
    '}': TOKEN_RBRACE,
    '==': TOKEN_EQ,
    '!=': TOKEN_NE,
    '>': TOKEN_GT,
    '>=': TOKEN_GTEQ,
    '<': TOKEN_LT,
    '<=': TOKEN_LTEQ,
    '=': TOKEN_ASSIGN,
    '.': TOKEN_DOT,
    ':': TOKEN_COLON,
    '|': TOKEN_PIPE,
    ',': TOKEN_COMMA,
    ';': TOKEN_SEMICOLON
}

reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile(
    '(%s)' %
    '|'.join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x))))

ignored_tokens = frozenset([
    TOKEN_COMMENT_BEGIN, TOKEN_COMMENT, TOKEN_COMMENT_END, TOKEN_WHITESPACE,
    TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT
])
ignore_if_empty = frozenset(
    [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT])


def _describe_token_type(token_type):
    if token_type in reverse_operators:
Ejemplo n.º 36
0
 def enter_frame(self, frame):
     """Remember all undeclared identifiers."""
     CodeGenerator.enter_frame(self, frame)
     for _, (action, param) in iteritems(frame.symbols.loads):
         if action == 'resolve':
             self.undeclared_identifiers.add(param)
Ejemplo n.º 37
0
Archivo: lexer.py Proyecto: runt18/mojo
    '}':            TOKEN_RBRACE,
    '==':           TOKEN_EQ,
    '!=':           TOKEN_NE,
    '>':            TOKEN_GT,
    '>=':           TOKEN_GTEQ,
    '<':            TOKEN_LT,
    '<=':           TOKEN_LTEQ,
    '=':            TOKEN_ASSIGN,
    '.':            TOKEN_DOT,
    ':':            TOKEN_COLON,
    '|':            TOKEN_PIPE,
    ',':            TOKEN_COMMA,
    ';':            TOKEN_SEMICOLON
}

reverse_operators = {v: k for k, v in iteritems(operators)}
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
                         sorted(operators, key=lambda x: -len(x))))

ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
                            TOKEN_COMMENT_END, TOKEN_WHITESPACE,
                            TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN,
                            TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
                             TOKEN_COMMENT, TOKEN_LINECOMMENT])


def _describe_token_type(token_type):
    if token_type in reverse_operators:
        return reverse_operators[token_type]
Ejemplo n.º 38
0
    def tokeniter(self, source, name, filename=None, state=None):
        """This method tokenizes the text and returns the tokens in a
        generator.  Use this method if you just want to tokenize a template.
        """
        source = text_type(source)
        lines = source.splitlines()
        if self.keep_trailing_newline and source:
            for newline in ('\r\n', '\r', '\n'):
                if source.endswith(newline):
                    lines.append('')
                    break
        source = '\n'.join(lines)
        pos = 0
        lineno = 1
        stack = ['root']
        if state is not None and state != 'root':
            assert state in ('variable', 'block'), 'invalid state'
            stack.append(state + '_begin')
        statetokens = self.rules[stack[-1]]
        source_length = len(source)
        balancing_stack = []
        lstrip_unless_re = self.lstrip_unless_re

        while 1:
            # tokenizer loop
            for regex, tokens, new_state in statetokens:
                m = regex.match(source, pos)
                # if no match we try again with the next rule
                if m is None:
                    continue

                # we only match blocks and variables if braces / parentheses
                # are balanced. continue parsing with the lower rule which
                # is the operator rule. do this only if the end tags look
                # like operators
                if (
                    balancing_stack
                    and tokens in (
                        TOKEN_VARIABLE_END, TOKEN_BLOCK_END, TOKEN_LINESTATEMENT_END
                    )
                ):
                    continue

                # tuples support more options
                if isinstance(tokens, tuple):
                    groups = m.groups()

                    if isinstance(tokens, OptionalLStrip):
                        # Rule supports lstrip. Match will look like
                        # text, block type, whitespace control, type, control, ...
                        text = groups[0]

                        # Skipping the text and first type, every other group is the
                        # whitespace control for each type. One of the groups will be
                        # -, +, or empty string instead of None.
                        strip_sign = next(g for g in groups[2::2] if g is not None)

                        if strip_sign == "-":
                            # Strip all whitespace between the text and the tag.
                            groups = (text.rstrip(),) + groups[1:]
                        elif (
                            # Not marked for preserving whitespace.
                            strip_sign != "+"
                            # lstrip is enabled.
                            and lstrip_unless_re is not None
                            # Not a variable expression.
                            and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
                        ):
                            # The start of text between the last newline and the tag.
                            l_pos = text.rfind('\n') + 1

                            # If there's only whitespace between the newline and the
                            # tag, strip it.
                            if not lstrip_unless_re.search(text, l_pos):
                                groups = (text[:l_pos],) + groups[1:]

                    for idx, token in enumerate(tokens):
                        # failure group
                        if token.__class__ is Failure:
                            raise token(lineno, filename)
                        # bygroup is a bit more complex, in that case we
                        # yield for the current token the first named
                        # group that matched
                        elif token == '#bygroup':
                            for key, value in iteritems(m.groupdict()):
                                if value is not None:
                                    yield lineno, key, value
                                    lineno += value.count('\n')
                                    break
                            else:
                                raise RuntimeError('%r wanted to resolve '
                                                   'the token dynamically'
                                                   ' but no group matched'
                                                   % regex)
                        # normal group
                        else:
                            data = groups[idx]
                            if data or token not in ignore_if_empty:
                                yield lineno, token, data
                            lineno += data.count('\n')

                # strings as token just are yielded as it.
                else:
                    data = m.group()
                    # update brace/parentheses balance
                    if tokens == TOKEN_OPERATOR:
                        if data == '{':
                            balancing_stack.append('}')
                        elif data == '(':
                            balancing_stack.append(')')
                        elif data == '[':
                            balancing_stack.append(']')
                        elif data in ('}', ')', ']'):
                            if not balancing_stack:
                                raise TemplateSyntaxError('unexpected \'%s\'' %
                                                          data, lineno, name,
                                                          filename)
                            expected_op = balancing_stack.pop()
                            if expected_op != data:
                                raise TemplateSyntaxError('unexpected \'%s\', '
                                                          'expected \'%s\'' %
                                                          (data, expected_op),
                                                          lineno, name,
                                                          filename)
                    # yield items
                    if data or tokens not in ignore_if_empty:
                        yield lineno, tokens, data
                    lineno += data.count('\n')

                # fetch new position into new variable so that we can check
                # if there is a internal parsing error which would result
                # in an infinite loop
                pos2 = m.end()

                # handle state changes
                if new_state is not None:
                    # remove the uppermost state
                    if new_state == '#pop':
                        stack.pop()
                    # resolve the new state by group checking
                    elif new_state == '#bygroup':
                        for key, value in iteritems(m.groupdict()):
                            if value is not None:
                                stack.append(key)
                                break
                        else:
                            raise RuntimeError('%r wanted to resolve the '
                                               'new state dynamically but'
                                               ' no group matched' %
                                               regex)
                    # direct state name given
                    else:
                        stack.append(new_state)
                    statetokens = self.rules[stack[-1]]
                # we are still at the same position and no stack change.
                # this means a loop without break condition, avoid that and
                # raise error
                elif pos2 == pos:
                    raise RuntimeError('%r yielded empty string without '
                                       'stack change' % regex)
                # publish new function and start again
                pos = pos2
                break
            # if loop terminated without break we haven't found a single match
            # either we are at the end of the file or we have a problem
            else:
                # end of text
                if pos >= source_length:
                    return
                # something went wrong
                raise TemplateSyntaxError('unexpected char %r at %d' %
                                          (source[pos], pos), lineno,
                                          name, filename)
Ejemplo n.º 39
0
    def template_ast(self, node, frame=None):  # pragma: no cover

        """ Shim for Jinja2's default ``Jinja``-sytnax-to-Python AST converter.
        Wraps template code in a module-level ``run`` function that binds it
        to an instance of :py:class:`jinja2.Environment`.

        :param node: Current AST node.
        :param frame: Current code frame.
        :return: ``None``. """

        assert frame is None, "no root frame allowed"
        eval_ctx = EvalContext(self.environment, self.name)

        from jinja2.runtime import __all__ as exported

        self.writeline("# -*- coding: utf-8 -*-")
        self.writeline("")
        self.writeline("from __future__ import division")
        self.writeline("from jinja2.runtime import " + ", ".join(exported))
        if not unoptimize_before_dead_code:
            self.writeline("dummy = lambda *x: None")

        # if we want a deferred initialization we cannot move the
        # environment into a local name
        envenv = not self.defer_init and ", environment=environment" or ""

        # do we have an extends tag at all?  If not, we can save some
        # overhead by just not processing any inheritance code.
        have_extends = node.find(nodes.Extends) is not None

        # find all blocks
        for block in node.find_all(nodes.Block):
            if block.name in self.blocks:
                self.fail("block %r defined twice" % block.name, block.lineno)
            self.blocks[block.name] = block

        # find all imports and import them
        for import_ in node.find_all(nodes.ImportedName):
            if import_.importname not in self.import_aliases:
                imp = import_.importname
                self.import_aliases[imp] = alias = self.temporary_identifier()
                if "." in imp:
                    module, obj = imp.rsplit(".", 1)
                    self.writeline("from %s import %s as %s" % (module, obj, alias))
                else:
                    self.writeline("import %s as %s" % (imp, alias))

        # add the load name
        self.writeline("name = %r" % self.name)

        # generate the deferred init wrapper
        self.writeline("def run(environment):", extra=1)
        self.indent()

        # generate the root render function.
        self.writeline("def root(context%s):" % envenv, extra=1)

        # process the root
        frame = Frame(eval_ctx)
        frame.inspect(node.body)
        frame.toplevel = frame.rootlevel = True
        frame.require_output_check = have_extends and not self.has_known_extends
        self.indent()
        if have_extends:
            self.writeline("parent_template = None")
        if "self" in find_undeclared(node.body, ("self",)):
            frame.identifiers.add_special("self")
            self.writeline("l_self = TemplateReference(context)")
        self.pull_locals(frame)
        self.pull_dependencies(node.body)
        self.blockvisit(node.body, frame)
        self.outdent()

        # make sure that the parent root is called.
        if have_extends:
            if not self.has_known_extends:
                self.indent()
                self.writeline("if parent_template is not None:")
            self.indent()
            self.writeline("for event in parent_template." "root_render_func(context):")
            self.indent()
            self.writeline("yield event")
            self.outdent(2 + (not self.has_known_extends))

        # at this point we now have the blocks collected and can visit them too.
        for name, block in iteritems(self.blocks):
            block_frame = Frame(eval_ctx)
            block_frame.inspect(block.body)
            block_frame.block = name
            self.writeline("def block_%s(context%s):" % (name, envenv), block, 1)
            self.indent()
            undeclared = find_undeclared(block.body, ("self", "super"))
            if "self" in undeclared:
                block_frame.identifiers.add_special("self")
                self.writeline("l_self = TemplateReference(context)")
            if "super" in undeclared:
                block_frame.identifiers.add_special("super")
                self.writeline("l_super = context.super(%r, " "block_%s)" % (name, name))
            self.pull_locals(block_frame)
            self.pull_dependencies(block.body)
            self.blockvisit(block.body, block_frame)
            self.outdent()

        self.writeline("blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks), extra=1)

        # add a function that returns the debug info
        self.writeline("debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info))

        self.writeline("return (root, blocks, debug_info)")
        self.outdent()
        self.writeline("")
Ejemplo n.º 40
0
def fake_exc_info(exc_info, filename, lineno):
    """Helper for `translate_exception`."""
    exc_type, exc_value, tb = exc_info

    # figure the real context out
    if tb is not None:
        real_locals = tb.tb_frame.f_locals.copy()
        ctx = real_locals.get('context')
        if ctx:
            locals = ctx.get_all()
        else:
            locals = {}
        for name, value in iteritems(real_locals):
            if name.startswith('l_') and value is not missing:
                locals[name[2:]] = value

        # if there is a local called __jinja_exception__, we get
        # rid of it to not break the debug functionality.
        locals.pop('__jinja_exception__', None)
    else:
        locals = {}

    # assamble fake globals we need
    globals = {
        '__name__':             filename,
        '__file__':             filename,
        '__jinja_exception__':  exc_info[:2],

        # we don't want to keep the reference to the templates around
        # to not cause circular dependencies, but we mark it as Jinja
        # frame for the ProcessedTraceback
        '__jinja_template__':   None
    }

    # and fake the exception
    code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')

    # if it's possible, change the name of the code.  This won't work
    # on some python environments such as google appengine
    try:
        if tb is None:
            location = 'templates'
        else:
            function = tb.tb_frame.f_code.co_name
            if function == 'root':
                location = 'top-level templates code'
            elif function.startswith('block_'):
                location = 'block "%s"' % function[6:]
            else:
                location = 'templates'

        if PY2:
            code = CodeType(0, code.co_nlocals, code.co_stacksize,
                            code.co_flags, code.co_code, code.co_consts,
                            code.co_names, code.co_varnames, filename,
                            location, code.co_firstlineno,
                            code.co_lnotab, (), ())
        else:
            code = CodeType(0, code.co_kwonlyargcount,
                            code.co_nlocals, code.co_stacksize,
                            code.co_flags, code.co_code, code.co_consts,
                            code.co_names, code.co_varnames, filename,
                            location, code.co_firstlineno,
                            code.co_lnotab, (), ())
    except Exception as e:
        pass

    # execute the code and catch the new traceback
    try:
        exec(code, globals, locals)
    except:
        exc_info = sys.exc_info()
        new_tb = exc_info[2].tb_next

    # return without this frame
    return exc_info[:2] + (new_tb,)