def _fix_plugins(contents_text: str, settings: Settings) -> str: try: ast_obj = ast_parse(contents_text) except SyntaxError: return contents_text callbacks = visit(FUNCS, ast_obj, settings) if not callbacks: return contents_text try: tokens = src_to_tokens(contents_text) except tokenize.TokenError: # pragma: no cover (bpo-2180) return contents_text _fixup_dedent_tokens(tokens) for i, token in reversed_enumerate(tokens): if not token.src: continue # though this is a defaultdict, by using `.get()` this function's # self time is almost 50% faster for callback in callbacks.get(token.offset, ()): callback(i, tokens) return tokens_to_src(tokens)
def _imports_future(contents_text: str, future_name: str) -> bool: try: ast_obj = ast_parse(contents_text) except SyntaxError: return False for node in ast_obj.body: # Docstring if isinstance(node, ast.Expr) and isinstance(node.value, ast.Str): continue elif isinstance(node, ast.ImportFrom): if ( node.level == 0 and node.module == '__future__' and any(name.name == future_name for name in node.names) ): return True elif node.module == '__future__': continue else: return False else: return False return False
def _fix_py36_plus(contents_text: str) -> str: try: ast_obj = ast_parse(contents_text) except SyntaxError: return contents_text visitor = FindPy36Plus() visitor.visit(ast_obj) if not any(( visitor.fstrings, visitor.named_tuples, visitor.dict_typed_dicts, visitor.kw_typed_dicts, )): return contents_text try: tokens = src_to_tokens(contents_text) except tokenize.TokenError: # pragma: no cover (bpo-2180) return contents_text for i, token in reversed_enumerate(tokens): if token.offset in visitor.fstrings: # TODO: handle \N escape sequences if r'\N' in token.src: continue paren = i + 3 if tokens_to_src(tokens[i + 1:paren + 1]) != '.format(': continue args, end = parse_call_args(tokens, paren) # if it spans more than one line, bail if tokens[end - 1].line != token.line: continue args_src = tokens_to_src(tokens[paren:end]) if '\\' in args_src or '"' in args_src or "'" in args_src: continue tokens[i] = token._replace( src=_to_fstring(token.src, tokens, args), ) del tokens[i + 1:end] elif token.offset in visitor.named_tuples and token.name == 'NAME': call = visitor.named_tuples[token.offset] types: Dict[str, ast.expr] = { tup.elts[0].s: tup.elts[1] # type: ignore # (checked above) for tup in call.args[1].elts # type: ignore # (checked above) } end, attrs = _typed_class_replacement(tokens, i, call, types) src = f'class {tokens[i].src}({_unparse(call.func)}):\n{attrs}' tokens[i:end] = [Token('CODE', src)] elif token.offset in visitor.kw_typed_dicts and token.name == 'NAME': call = visitor.kw_typed_dicts[token.offset] types = { arg.arg: arg.value # type: ignore # (checked above) for arg in call.keywords } end, attrs = _typed_class_replacement(tokens, i, call, types) src = f'class {tokens[i].src}({_unparse(call.func)}):\n{attrs}' tokens[i:end] = [Token('CODE', src)] elif token.offset in visitor.dict_typed_dicts and token.name == 'NAME': call = visitor.dict_typed_dicts[token.offset] types = { k.s: v # type: ignore # (checked above) for k, v in zip( call.args[1].keys, # type: ignore # (checked above) call.args[1].values, # type: ignore # (checked above) ) } if call.keywords: total = call.keywords[0].value.value # type: ignore # (checked above) # noqa: E501 end, attrs = _typed_class_replacement(tokens, i, call, types) src = ( f'class {tokens[i].src}(' f'{_unparse(call.func)}, total={total}' f'):\n' f'{attrs}' ) tokens[i:end] = [Token('CODE', src)] else: end, attrs = _typed_class_replacement(tokens, i, call, types) src = f'class {tokens[i].src}({_unparse(call.func)}):\n{attrs}' tokens[i:end] = [Token('CODE', src)] return tokens_to_src(tokens)
def _fix_py36_plus(contents_text: str) -> str: try: ast_obj = ast_parse(contents_text) except SyntaxError: return contents_text visitor = FindPy36Plus() visitor.visit(ast_obj) if not any(( visitor.fstrings, visitor.named_tuples, visitor.dict_typed_dicts, visitor.kw_typed_dicts, )): return contents_text try: tokens = src_to_tokens(contents_text) except tokenize.TokenError: # pragma: no cover (bpo-2180) return contents_text for i, token in reversed_enumerate(tokens): if token.offset in visitor.fstrings: node = visitor.fstrings[token.offset] # TODO: handle \N escape sequences if r'\N' in token.src: continue paren = i + 3 if tokens_to_src(tokens[i + 1:paren + 1]) != '.format(': continue # we don't actually care about arg position, so we pass `node` fmt_victims = victims(tokens, paren, node, gen=False) end = fmt_victims.ends[-1] # if it spans more than one line, bail if tokens[end].line != token.line: continue tokens[i] = token._replace(src=_to_fstring(token.src, node)) del tokens[i + 1:end + 1] elif token.offset in visitor.named_tuples and token.name == 'NAME': call = visitor.named_tuples[token.offset] types: Dict[str, ast.expr] = { tup.elts[0].s: tup.elts[1] # type: ignore # (checked above) for tup in call.args[1].elts # type: ignore # (checked above) } _replace_typed_class(tokens, i, call, types) elif token.offset in visitor.kw_typed_dicts and token.name == 'NAME': call = visitor.kw_typed_dicts[token.offset] types = { arg.arg: arg.value # type: ignore # (checked above) for arg in call.keywords } _replace_typed_class(tokens, i, call, types) elif token.offset in visitor.dict_typed_dicts and token.name == 'NAME': call = visitor.dict_typed_dicts[token.offset] types = { k.s: v # type: ignore # (checked above) for k, v in zip( call.args[1].keys, # type: ignore # (checked above) call.args[1].values, # type: ignore # (checked above) ) } _replace_typed_class(tokens, i, call, types) return tokens_to_src(tokens)