示例#1
0
def _fix_optional(i: int, tokens: List[Token]) -> None:
    j = find_token(tokens, i, '[')
    k = find_closing_bracket(tokens, j)
    if tokens[j].line == tokens[k].line:
        tokens[k] = Token('CODE', ' | None')
        del tokens[i:j + 1]
    else:
        tokens[j] = tokens[j]._replace(src='(')
        tokens[k] = tokens[k]._replace(src=')')
        tokens[i:j] = [Token('CODE', 'None | ')]
示例#2
0
def _fix_import_mock(i: int, tokens: List[Token]) -> None:
    j = find_token(tokens, i, 'mock')
    if (
            j + 2 < len(tokens) and
            tokens[j + 1].src == '.' and
            tokens[j + 2].src == 'mock'
    ):
        j += 2
    src = 'from unittest import mock'
    tokens[i:j + 1] = [tokens[j]._replace(name='NAME', src=src)]
示例#3
0
def _fix_import_from_mock(i: int, tokens: List[Token]) -> None:
    j = find_token(tokens, i, 'mock')
    if (
            j + 2 < len(tokens) and
            tokens[j + 1].src == '.' and
            tokens[j + 2].src == 'mock'
    ):
        k = j + 2
    else:
        k = j
    src = 'unittest.mock'
    tokens[j:k + 1] = [tokens[j]._replace(name='NAME', src=src)]
示例#4
0
def _fix_union(
        i: int,
        tokens: List[Token],
        *,
        arg: ast.expr,
        arg_count: int,
) -> None:
    arg_offset = ast_to_offset(arg)
    j = find_token(tokens, i, '[')
    to_delete = []
    commas: List[int] = []

    arg_depth = -1
    depth = 1
    k = j + 1
    while depth:
        if tokens[k].src in OPENING:
            if arg_depth == -1:
                to_delete.append(k)
            depth += 1
        elif tokens[k].src in CLOSING:
            depth -= 1
            if 0 < depth < arg_depth:
                to_delete.append(k)
        elif tokens[k].offset == arg_offset:
            arg_depth = depth
        elif depth == arg_depth and tokens[k].src == ',':
            if len(commas) >= arg_count - 1:
                to_delete.append(k)
            else:
                commas.append(k)

        k += 1
    k -= 1

    if tokens[j].line == tokens[k].line:
        del tokens[k]
        for comma in commas:
            tokens[comma] = Token('CODE', ' |')
        for paren in reversed(to_delete):
            del tokens[paren]
        del tokens[i:j + 1]
    else:
        tokens[j] = tokens[j]._replace(src='(')
        tokens[k] = tokens[k]._replace(src=')')

        for comma in commas:
            tokens[comma] = Token('CODE', ' |')
        for paren in reversed(to_delete):
            del tokens[paren]
        del tokens[i:j]
示例#5
0
def _fix_union(
    i: int,
    tokens: List[Token],
    *,
    arg_count: int,
) -> None:
    depth = 1
    parens_done = []
    open_parens = []
    commas = []
    coding_depth = None

    j = find_token(tokens, i, '[')
    k = j + 1
    while depth:
        # it's possible our first coding token is a close paren
        # so make sure this is separate from the if chain below
        if (tokens[k].name not in NON_CODING_TOKENS and tokens[k].src != '('
                and coding_depth is None):
            if tokens[k].src == ')':  # the coding token was an empty tuple
                coding_depth = depth - 1
            else:
                coding_depth = depth

        if tokens[k].src in OPENING:
            if tokens[k].src == '(':
                open_parens.append((depth, k))

            depth += 1
        elif tokens[k].src in CLOSING:
            if tokens[k].src == ')':
                paren_depth, open_paren = open_parens.pop()
                parens_done.append((paren_depth, (open_paren, k)))

            depth -= 1
        elif tokens[k].src == ',':
            commas.append((depth, k))

        k += 1
    k -= 1

    assert coding_depth is not None
    assert not open_parens, open_parens
    comma_depth = min((depth for depth, _ in commas), default=sys.maxsize)
    min_depth = min(comma_depth, coding_depth)

    to_delete = [
        paren for depth, positions in parens_done if depth < min_depth
        for paren in positions
    ]

    if comma_depth <= coding_depth:
        comma_positions = [k for depth, k in commas if depth == comma_depth]
        if len(comma_positions) == arg_count:
            to_delete.append(comma_positions.pop())
    else:
        comma_positions = []

    to_delete.sort()

    if tokens[j].line == tokens[k].line:
        del tokens[k]
        for comma in comma_positions:
            tokens[comma] = Token('CODE', ' |')
        for paren in reversed(to_delete):
            del tokens[paren]
        del tokens[i:j + 1]
    else:
        tokens[j] = tokens[j]._replace(src='(')
        tokens[k] = tokens[k]._replace(src=')')

        for comma in comma_positions:
            tokens[comma] = Token('CODE', ' |')
        for paren in reversed(to_delete):
            del tokens[paren]
        del tokens[i:j]
示例#6
0
def _remove_call(i: int, tokens: List[Token]) -> None:
    i = find_open_paren(tokens, i)
    j = find_token(tokens, i, ')')
    del tokens[i:j + 1]
示例#7
0
def _fix_yield(i: int, tokens: List[Token]) -> None:
    in_token = find_token(tokens, i, 'in')
    colon = find_block_start(tokens, i)
    block = Block.find(tokens, i, trim_end=True)
    container = tokens_to_src(tokens[in_token + 1:colon]).strip()
    tokens[i:block.end] = [Token('CODE', f'yield from {container}\n')]
示例#8
0
def _fix_mock_mock(i: int, tokens: List[Token]) -> None:
    j = find_token(tokens, i + 1, 'mock')
    del tokens[i + 1:j + 1]
示例#9
0
def _replace_comma_with_pipe(i: int, tokens: List[Token]) -> None:
    comma = find_token(tokens, i, ',')
    tokens[comma] = Token('CODE', ' |')