def _make_fstring(tokens):
    import tokenize_rt

    new_tokens = []
    exprs = []

    for i, token in enumerate(tokens):
        if token.name == 'STRING' and _is_f(token):
            prefix, s = tokenize_rt.parse_string_literal(token.src)
            parts = []
            try:
                _fstring_parse_outer(s, 0, 0, parts, exprs)
            except SyntaxError as e:
                raise TokenSyntaxError(e, tokens[i - 1])
            if 'r' in prefix.lower():
                parts = [s.replace('\\', '\\\\') for s in parts]
            token = token._replace(src=''.join(parts))
        elif token.name == 'STRING':
            new_src = token.src.replace('{', '{{').replace('}', '}}')
            token = token._replace(src=new_src)
        new_tokens.append(token)

    exprs = ('({})'.format(expr) for expr in exprs)
    format_src = '.format({})'.format(', '.join(exprs))
    new_tokens.append(tokenize_rt.Token('FORMAT', src=format_src))

    return new_tokens
Exemplo n.º 2
0
def _make_fstring(tokens):
    new_tokens = []
    exprs = []

    for i, token in enumerate(tokens):
        if fstr(token):
            parts = []
            try:
                _fstring_parse_outer(token.src[1:], 0, 0, parts, exprs)
            except SyntaxError as e:
                raise TokenSyntaxError(e, tokens[i])
            token = token._replace(src="".join(parts))

        new_tokens.append(token)

    # () required for cases like f'{1, 2}' => '{}'.format( (1, 2) )
    exprs = ("({})".format(expr) for expr in exprs)

    format_src = ".format({})".format(", ".join(exprs))
    new_tokens.append(tokenize_rt.Token("FORMAT", src=format_src))
    return new_tokens
def decode(b: bytes, errors: str = 'strict') -> Tuple[str, int]:
    u, length = utf_8.decode(b, errors)

    # replace encoding cookie so there isn't a recursion problem
    lines = u.splitlines(True)
    for idx in (0, 1):
        if idx >= len(lines):
            break
        lines[idx] = tokenize.cookie_re.sub(_new_coding_cookie, lines[idx])
    u = ''.join(lines)

    visitor = Visitor()
    visitor.visit(_ast_parse(u))

    tokens = tokenize_rt.src_to_tokens(u)
    for i, token in tokenize_rt.reversed_enumerate(tokens):
        if token.offset in visitor.offsets:
            # look forward for a `:`, `,`, `=`, ')'
            depth = 0
            j = i + 1
            while depth or tokens[j].src not in {':', ',', '=', ')', '\n'}:
                if tokens[j].src in {'(', '{', '['}:
                    depth += 1
                elif tokens[j].src in {')', '}', ']'}:
                    depth -= 1
                j += 1
            j -= 1

            # look backward to delete whitespace / comments / etc.
            while tokens[j].name in tokenize_rt.NON_CODING_TOKENS:
                j -= 1

            quoted = repr(tokenize_rt.tokens_to_src(tokens[i:j + 1]))
            tokens[i:j + 1] = [tokenize_rt.Token('STRING', quoted)]

    return tokenize_rt.tokens_to_src(tokens), length
Exemplo n.º 4
0
def _make_fstring(src):
    import tokenize_rt

    return [tokenize_rt.Token(name='FSTRING', src=_fstring_reformat(src))]