Beispiel #1
0
def _fix_extraneous_parens(tokens: List[Token], i: int) -> None:
    # search forward for another non-coding token
    i += 1
    while tokens[i].name in NON_CODING_TOKENS:
        i += 1
    # if we did not find another brace, return immediately
    if tokens[i].src != '(':
        return

    start = i
    depth = 1
    while depth:
        i += 1
        # found comma or yield at depth 1: this is a tuple / coroutine
        if depth == 1 and tokens[i].src in {',', 'yield'}:
            return
        elif tokens[i].src in OPENING:
            depth += 1
        elif tokens[i].src in CLOSING:
            depth -= 1
    end = i

    # empty tuple
    if all(t.name in NON_CODING_TOKENS for t in tokens[start + 1:i]):
        return

    # search forward for the next non-coding token
    i += 1
    while tokens[i].name in NON_CODING_TOKENS:
        i += 1

    if tokens[i].src == ')':
        remove_brace(tokens, end)
        remove_brace(tokens, start)
Beispiel #2
0
def _fix_set_literal(i: int, tokens: List[Token], *, arg: ast.expr) -> None:
    # TODO: this could be implemented with a little extra logic
    if not immediately_paren('set', tokens, i):
        return

    gen = isinstance(arg, ast.GeneratorExp)
    set_victims = victims(tokens, i + 1, arg, gen=gen)

    del set_victims.starts[0]
    end_index = set_victims.ends.pop()

    tokens[end_index] = Token('OP', '}')
    for index in reversed(set_victims.starts + set_victims.ends):
        remove_brace(tokens, index)
    tokens[i:i + 2] = [Token('OP', '{')]
Beispiel #3
0
def _fix_dict_comp(
    i: int,
    tokens: List[Token],
    arg: Union[ast.ListComp, ast.GeneratorExp],
) -> None:
    if not immediately_paren('dict', tokens, i):
        return

    dict_victims = victims(tokens, i + 1, arg, gen=True)
    elt_victims = victims(tokens, dict_victims.arg_index, arg.elt, gen=True)

    del dict_victims.starts[0]
    end_index = dict_victims.ends.pop()

    tokens[end_index] = Token('OP', '}')
    for index in reversed(dict_victims.ends):
        remove_brace(tokens, index)
    # See #6, Fix SyntaxError from rewriting dict((a, b)for a, b in y)
    if tokens[elt_victims.ends[-1] + 1].src == 'for':
        tokens.insert(elt_victims.ends[-1] + 1, Token(UNIMPORTANT_WS, ' '))
    for index in reversed(elt_victims.ends):
        remove_brace(tokens, index)
    assert elt_victims.first_comma_index is not None
    tokens[elt_victims.first_comma_index] = Token('OP', ':')
    for index in reversed(dict_victims.starts + elt_victims.starts):
        remove_brace(tokens, index)
    tokens[i:i + 2] = [Token('OP', '{')]
Beispiel #4
0
def _fix_percent_format_tuple(
    i: int,
    tokens: List[Token],
    *,
    node_right: ast.Tuple,
) -> None:
    # TODO: handle \N escape sequences
    if r'\N' in tokens[i].src:
        return

    # TODO: this is overly timid
    paren = i + 4
    if tokens_to_src(tokens[i + 1:paren + 1]) != ' % (':
        return

    fmt_victims = victims(tokens, paren, node_right, gen=False)
    fmt_victims.ends.pop()

    for index in reversed(fmt_victims.starts + fmt_victims.ends):
        remove_brace(tokens, index)

    newsrc = _percent_to_format(tokens[i].src)
    tokens[i] = tokens[i]._replace(src=newsrc)
    tokens[i + 1:paren] = [Token('Format', '.format'), Token('OP', '(')]