def test_fix_code_should_ignore_duplicate_key_with_comments(self): """We only handle simple cases.""" code = """\ a = { (0,1) # : f : 1, (0, 1): 'two', (0,1): 3, } print(a) """ self.assertEqual( code, ''.join(autoflake.fix_code(code, remove_duplicate_keys=True))) code = """\ { 1: {0, }, 1: #{2, #}, 0 } """ self.assertEqual( code, ''.join(autoflake.fix_code(code, remove_duplicate_keys=True)))
def test_fix_code_with_from_with_and_without_remove_all(self): code = """\ from x import a as b, c as d """ self.assertEqual( """\ """, autoflake.fix_code(code, remove_all_unused_imports=True)) self.assertEqual( code, autoflake.fix_code(code, remove_all_unused_imports=False))
def run(self, filename, file, remove_all_unused_imports: bool = False, remove_unused_variables: bool = True): """ Detects unused code. By default this functionality is limited to: - Unneeded pass statements. - Unneeded builtin imports. :param remove_all_unused_imports: True removes all unused imports - might have side effects :param remove_unused_variables: True removes unused variables - might have side effects """ corrected = autoflake.fix_code( ''.join(file), additional_imports=None, remove_all_unused_imports=remove_all_unused_imports, remove_unused_variables=remove_unused_variables).splitlines(True) for diff in Diff.from_string_arrays(file, corrected).split_diff(): yield Result(self, 'This file contains unused source code.', affected_code=(diff.range(filename), ), diffs={filename: diff})
def create_schemas(items): """Create and write schemas from definitions.""" schema_classes, schema_names = create_schemas_classes(items) items_py = '\n'.join(chain([ITEMS_IMPORTS], schema_classes)).strip() items_py = fix_lines( fix_code(items_py.decode('utf-8')).splitlines(), Options) return items_py, schema_names
def run(self, filename, file, remove_all_unused_imports: bool=False, remove_unused_variables: bool=True): """ Detects unused code. By default this functionality is limited to: - Unneeded pass statements. - Unneeded builtin imports. :param remove_all_unused_imports: True removes all unused imports - might have side effects :param remove_unused_variables: True removes unused variables - might have side effects """ corrected = autoflake.fix_code( ''.join(file), additional_imports=None, remove_all_unused_imports=remove_all_unused_imports, remove_unused_variables=remove_unused_variables ).splitlines(True) for diff in Diff.from_string_arrays(file, corrected).split_diff(): yield Result(self, 'This file contains unused source code.', affected_code=(diff.range(filename),), diffs={filename: diff})
def render_template( self, template_name: str, context: Dict[str, Any], reformat_code: bool = True ): log.info("Render template %r with context %s", template_name, context) code = self.env.get_template(template_name).render(context) if reformat_code: code = autoflake.fix_code( code, additional_imports=None, expand_star_imports=True, remove_all_unused_imports=True, remove_duplicate_keys=True, remove_unused_variables=False, ignore_init_module_imports=False, ) code = isort.SortImports(file_contents=code).output try: code = black.format_file_contents( code, fast=True, mode=black.FileMode( target_versions={black.TargetVersion.PY37}, line_length=99 ), ) except black.NothingChanged: pass return code
def generate_code(service_name: str, doc: bool = False) -> str: model = load_service(service_name) output = io.StringIO() generate_service_types(output, model, doc=doc) generate_service_api(output, model, doc=doc) code = output.getvalue() try: import autoflake import isort from black import FileMode, format_str # try to format with black code = format_str(code, mode=FileMode(line_length=100)) # try to remove unused imports code = autoflake.fix_code(code, remove_all_unused_imports=True) # try to sort imports code = isort.code(code, config=isort.Config(profile="black", line_length=100)) except Exception: pass return code
def create_schemas(items): """Create and write schemas from definitions.""" schema_classes, schema_names = create_schemas_classes(items) items_py = '\n'.join(chain([ITEMS_IMPORTS], schema_classes)).strip() items_py = fix_lines(fix_code(items_py.decode('utf-8')).splitlines(), Options) return items_py, schema_names
def test_fix_code_with_unused_variables_should_skip_multiple(self): code = """\ def main(): (x, y, z) = (1, 2, 3) print(z) """ self.assertEqual( code, autoflake.fix_code(code, remove_unused_variables=True))
def test_fix_code_with_from_and_as_and_escaped_newline(self): """Make sure stuff after escaped newline is not lost.""" result = autoflake.fix_code("""\ from collections import defaultdict, namedtuple \\ as xyz xyz """) # We currently leave lines with escaped newlines as is. But in the # future this we may parse them and remove unused import accordingly. # For now, we'll work around it here. result = re.sub(r' *\\\n *as ', ' as ', result) self.assertEqual( """\ from collections import namedtuple as xyz xyz """, autoflake.fix_code(result))
def test_fix_code_with_from_and_depth_module(self): self.assertEqual( """\ from distutils.version import StrictVersion StrictVersion('1.0.0') """, autoflake.fix_code("""\ from distutils.version import LooseVersion, StrictVersion StrictVersion('1.0.0') """)) self.assertEqual( """\ from distutils.version import StrictVersion as version version('1.0.0') """, autoflake.fix_code("""\ from distutils.version import LooseVersion, StrictVersion as version version('1.0.0') """))
def shed( *, source_code: str, first_party_imports: FrozenSet[str] = frozenset()) -> str: """Process the source code of a single module.""" assert isinstance(source_code, str) assert isinstance(first_party_imports, frozenset) assert all(isinstance(name, str) for name in first_party_imports) assert all(name.isidentifier() for name in first_party_imports) # Use black to autodetect our target versions target_versions = { v for v in black.detect_target_versions( black.lib2to3_parse(source_code.lstrip(), set(_version_map))) if v.value >= black.TargetVersion.PY36.value } assert target_versions input_code = source_code # Autoflake first: source_code = autoflake.fix_code( source_code, expand_star_imports=True, remove_all_unused_imports=True, remove_duplicate_keys=True, remove_unused_variables=True, ) # Then isort... # TODO: swap as soon as 5.0 is released for black compat & clean config handling # source_code = isort.api.sorted_imports( # file_contents=source_code, known_first_party=first_party_imports, # ) source_code = isort.SortImports(file_contents=source_code).output # Now pyupgrade - see pyupgrade._fix_file source_code = pyupgrade._fix_tokens( source_code, min_version=_version_map[min(target_versions, key=attrgetter("value"))], ) source_code = pyupgrade._fix_percent_format(source_code) source_code = pyupgrade._fix_py3_plus(source_code) # and finally Black! source_code = black.format_str( source_code, mode=black.FileMode(target_versions=target_versions)) if source_code == input_code: return source_code # If we've modified the code, iterate to a fixpoint. # e.g. "pass;#" -> "pass\n#\n" -> "#\n" return shed(source_code=source_code, first_party_imports=first_party_imports)
def test_fix_code_with_unused_variables_should_skip_nonlocal(self): """pyflakes does not handle nonlocal correctly.""" code = """\ def bar(): x = 1 def foo(): nonlocal x x = 2 """ self.assertEqual( code, autoflake.fix_code(code, remove_unused_variables=True))
def test_fix_code_with_comma_on_right(self): """pyflakes does not handle nonlocal correctly.""" self.assertEqual( """\ def main(): pass """, autoflake.fix_code("""\ def main(): x = (1, 2, 3) """, remove_unused_variables=True))
def test_fix_code_should_ignore_complex_case_of_duplicate_key(self): """We only handle simple cases.""" code = """\ a = {(0,1): 1, (0, 1): 'two', (0,1): 3, } print(a) """ self.assertEqual( code, ''.join(autoflake.fix_code(code, remove_duplicate_keys=True)))
def test_fix_code_with_indented_from(self): self.assertEqual( """\ def z(): from ctypes import POINTER, byref POINTER, byref """, autoflake.fix_code("""\ def z(): from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref POINTER, byref """)) self.assertEqual( """\ def z(): pass """, autoflake.fix_code("""\ def z(): from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref """))
def test_fix_code_with_unused_variables(self): self.assertEqual( """\ def main(): y = 11 print(y) """, autoflake.fix_code("""\ def main(): x = 10 y = 11 print(y) """, remove_unused_variables=True))
def run(self, filename, file): """ Detects unused code. This functionality is limited to: - Unneeded pass statements. - Unneeded builtin imports. (Others might have side effects.) """ corrected = autoflake.fix_code(''.join(file)).splitlines(True) for diff in Diff.from_string_arrays(file, corrected).split_diff(): yield Result(self, "This file contains unused source code.", affected_code=(diff.range(filename),), diffs={filename: diff})
class PyUnusedCodeBear(CorrectionBasedBear): GET_REPLACEMENT = staticmethod( lambda file: (autoflake.fix_code(''.join(file)).splitlines(True), [])) RESULT_MESSAGE = "This file contains unused source code." def run(self, filename, file): """ Detects unused code. This functionality is limited to: - Unneeded pass statements. - Unneeded builtin imports. (Others might have side effects.) """ for result in self.retrieve_results(filename, file): yield result
def test_fix_code_with_from_and_as(self): self.assertEqual( """\ from collections import namedtuple as xyz xyz """, autoflake.fix_code("""\ from collections import defaultdict, namedtuple as xyz xyz """)) self.assertEqual( """\ from collections import namedtuple as xyz xyz """, autoflake.fix_code("""\ from collections import defaultdict as abc, namedtuple as xyz xyz """)) self.assertEqual( """\ from collections import namedtuple namedtuple """, autoflake.fix_code("""\ from collections import defaultdict as abc, namedtuple namedtuple """)) self.assertEqual( """\ """, autoflake.fix_code("""\ from collections import defaultdict as abc, namedtuple as xyz """))
def clean_python_code(python_code, autoflake=True, tools_json=False): global tools_with_pipe # temporarily comment out ipython %magic to avoid black / yapf errors python_code = re.sub("^%", "##%##", python_code, flags=re.M) python_code = re.sub("^!", "##!##", python_code, flags=re.M) # run source code string through autoflake if autoflake: # programmatic autoflake python_code = fix_code( python_code, expand_star_imports=True, remove_all_unused_imports=True, remove_duplicate_keys=True, remove_unused_variables=True, ) # process tools_json if provided if isinstance(tools_json, str): test_file = Path(tools_json) if test_file.is_file(): # json file's path is provided with open(tools_json, "r") as f: user_tools_with_pipe = load(f) else: # json directly provided as string user_tools_with_pipe = loads(tools_json) check_user_input_format(user_tools_with_pipe) # Update tools_with_pipe from configurations of user with users preferences taking precedence tools_with_pipe = {**tools_with_pipe, **user_tools_with_pipe} # run source code through elements in tools_with_pipe.keys() for tool in tools_with_pipe.values(): if tool["active"]: pipe = Popen( ([tool["command"]] + tool["args"]), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True, ) python_code, stderrdata = pipe.communicate(python_code) if stderrdata != "": raise Exception(stderrdata) # restore ipython %magic python_code = re.sub("^##%##", "%", python_code, flags=re.M) python_code = re.sub("^##!##", "!", python_code, flags=re.M) return python_code
def test_fix_code_should_ignore_complex_case_of_duplicate_key_comma(self): """We only handle simple cases.""" code = """\ { 1: {0, }, 1: {2, }, } """ self.assertEqual( code, ''.join(autoflake.fix_code(code, remove_duplicate_keys=True)))
def test_fix_code_should_ignore_duplicate_key_with_no_comma(self): """We don't want to delete the line and leave a lone comma.""" code = """\ a = { (0,1) : 1 , (0, 1): 'two', (0,1): 3, } print(a) """ self.assertEqual( code, ''.join(autoflake.fix_code(code, remove_duplicate_keys=True)))
def test_fix_code_with_duplicate_key(self): self.assertEqual( """\ a = { (0,1): 3, } print(a) """, ''.join(autoflake.fix_code("""\ a = { (0,1): 1, (0, 1): 'two', (0,1): 3, } print(a) """, remove_duplicate_keys=True)))
def create_spiders(spiders, schemas, extractors, items): """Create all spiders from slybot spiders.""" item_classes = '' if items: item_classes = '\nfrom ..items import {}'.format(', '.join( (v().__class__.__name__ for v in items.values()))) spider_data = [] for name, (spider, spec) in spiders.items(): log.info('Creating spider "%s"' % spider.name) spider = create_spider(name, spider, spec, schemas, extractors, items) cleaned_name = _clean(name) filename = 'spiders/{}.py'.format(cleaned_name) data = '\n'.join( (SPIDER_FILE(item_classes=item_classes), spider.strip())) code = fix_lines(fix_code(data.decode('utf-8')).splitlines(), Options) spider_data.append((filename, code)) return spider_data
def create_spiders(spiders, schemas, extractors, items): """Create all spiders from slybot spiders.""" item_classes = '' if items: item_classes = '\nfrom ..items import {}'.format( ', '.join((v().__class__.__name__ for v in items.values())) ) spider_data = [] for name, (spider, spec) in spiders.items(): log.info('Creating spider "%s"' % spider.name) spider = create_spider(name, spider, spec, schemas, extractors, items) cleaned_name = _clean(name) filename = 'spiders/{}.py'.format(cleaned_name) data = '\n'.join((SPIDER_FILE(item_classes=item_classes), spider.strip())) code = fix_lines(fix_code(data.decode('utf-8')).splitlines(), Options) spider_data.append((filename, code)) return spider_data
def test_fix_code(self): self.assertEqual( """\ import os import math from sys import version os.foo() math.pi x = version """, autoflake.fix_code("""\ import os import re import abc, math, subprocess from sys import exit, version os.foo() math.pi x = version """))
def postprocess_hacks(text, mod): import autoflake import yapf # Hack to remove lines caused by Py2 compat text = text.replace('Generator = object\n', '') text = text.replace('select = NotImplemented\n', '') text = text.replace('iteritems: Any\n', '') text = text.replace('text_type = str\n', '') text = text.replace('text_type: Any\n', '') text = text.replace('string_types: Any\n', '') text = text.replace('PY2: Any\n', '') text = text.replace('__win32_can_symlink__: Any\n', '') # text = text.replace('odict = OrderedDict', '') # text = text.replace('ddict = defaultdict', '') if mod.path.endswith('util_path.py'): # hack for forward reference text = text.replace(' -> Path:', " -> 'Path':") text = text.replace('class Path(_PathBase)', "class Path") # Format the PYI file nicely text = autoflake.fix_code(text, remove_unused_variables=True, remove_all_unused_imports=True) # import autopep8 # text = autopep8.fix_code(text, options={ # 'aggressive': 0, # 'experimental': 0, # }) style = yapf.yapf_api.style.CreatePEP8Style() text, _ = yapf.yapf_api.FormatCode(text, filename='<stdin>', style_config=style, lines=None, verify=False) # print(text) return text
def formatters(aggressive, apply_config, filename='', remove_all_unused_imports=False, remove_unused_variables=False): """Return list of code formatters.""" if aggressive: yield lambda code: autoflake.fix_code( code, remove_all_unused_imports=remove_all_unused_imports, remove_unused_variables=remove_unused_variables) autopep8_options = autopep8.parse_args( [filename] + int(aggressive) * ['--aggressive'], apply_config=apply_config) else: autopep8_options = autopep8.parse_args([filename], apply_config=apply_config) yield lambda code: autopep8.fix_code(code, options=autopep8_options) yield docformatter.format_code yield unify.format_code
def test_fix_code_with_duplicate_key_with_many_braces(self): self.assertEqual( """\ a = None {None: {None: None}, } { None: a.b, } """, ''.join(autoflake.fix_code("""\ a = None {None: {None: None}, } { None: a.a, None: a.b, } """, remove_duplicate_keys=True)))
def test_fix_code_with_duplicate_key_longer(self): self.assertEqual( """\ { 'a': 0, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'b': 6, } """, ''.join(autoflake.fix_code("""\ { 'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'b': 6, } """, remove_duplicate_keys=True)))
def shed( source_code: str, *, refactor: bool = False, first_party_imports: FrozenSet[str] = frozenset(), min_version: Tuple[int, int] = _default_min_version, _location: str = "string passed to shed.shed()", _remove_unused_imports: bool = True, ) -> str: """Process the source code of a single module.""" assert isinstance(source_code, str) assert isinstance(refactor, bool) assert isinstance(first_party_imports, frozenset) assert all(isinstance(name, str) for name in first_party_imports) assert all(name.isidentifier() for name in first_party_imports) assert min_version in _version_map.values() if source_code == "": return "" # Use black to autodetect our target versions try: parsed = lib2to3_parse( source_code.lstrip(), target_versions={ k for k, v in _version_map.items() if v >= min_version }, ) # black.InvalidInput, blib2to3.pgen2.tokenize.TokenError, SyntaxError... # for forwards-compatibility I'm just going general here. except Exception as err: msg = f"Could not parse {_location}" for pattern, blocktype in _SUGGESTIONS: if re.search(pattern, source_code, flags=re.MULTILINE): msg += f"\n Perhaps you should use a {blocktype!r} block instead?" try: compile(source_code, "<string>", "exec") except SyntaxError: pass else: msg += "\n The syntax is valid Python, so please report this as a bug." w = ShedSyntaxWarning(msg) w.__cause__ = err warnings.warn(w, stacklevel=_location.count(" block in ") + 2) # Even if the code itself has invalid syntax, we might be able to # regex-match and therefore reformat code embedded in docstrings. return docshed( source_code, refactor=refactor, first_party_imports=first_party_imports, min_version=min_version, _location=_location, ) target_versions = set(_version_map) & set( black.detect_target_versions(parsed)) assert target_versions min_version = max( min_version, _version_map[min(target_versions, key=attrgetter("value"))], ) if refactor: # Here we have a deferred imports section, which is pretty ugly. # It does however have one crucial advantage: several hundred milliseconds # of startup latency in the common case where --refactor was *not* passed. # This is a big deal for interactive use-cases such as pre-commit hooks # or format-on-save in editors (though I prefer Black for the latter). global com2ann global _run_codemods if com2ann is None: from ._codemods import _run_codemods # type: ignore try: from com2ann import com2ann except ImportError: # pragma: no cover # on Python 3.8 assert sys.version_info < (3, 8) com2ann = _fallback # OK, everything's imported, back to the runtime logic! # Some tools assume that the file is multi-line, but empty files are valid input. source_code += "\n" # Use com2ann to comvert type comments to annotations on Python 3.8+ annotated = com2ann( source_code, drop_ellipsis=True, silent=True, python_minor_version=min(min_version[1], sys.version_info[1]), ) if annotated: # pragma: no branch # This can only be None if ast.parse() raises a SyntaxError, # which is possible but rare after the parsing checks above. source_code, _ = annotated # One tricky thing: running `isort` or `autoflake` can "unlock" further fixes # for `black`, e.g. "pass;#" -> "pass\n#\n" -> "#\n". We therefore run it # before other fixers, and then (if they made changes) again afterwards. black_mode = black.Mode(target_versions=target_versions) # type: ignore source_code = blackened = black.format_str(source_code, mode=black_mode) pyupgrade_min = min(min_version, max(pyupgrade._main.IMPORT_REMOVALS)) pu_settings = pyupgrade._main.Settings(min_version=pyupgrade_min) source_code = pyupgrade._main._fix_plugins(source_code, settings=pu_settings) if source_code != blackened: # Second step to converge: https://github.com/asottile/pyupgrade/issues/273 source_code = pyupgrade._main._fix_plugins(source_code, settings=pu_settings) source_code = pyupgrade._main._fix_tokens(source_code, min_version=pyupgrade_min) if refactor: source_code = _run_codemods(source_code, min_version=min_version) try: source_code = isort.code( source_code, known_first_party=first_party_imports, known_local_folder={"tests"}, profile="black", combine_as_imports=True, ) except FileSkipComment: pass source_code = autoflake.fix_code( source_code, expand_star_imports=True, remove_all_unused_imports=_remove_unused_imports, ) if source_code != blackened: source_code = black.format_str(source_code, mode=black_mode) # Then shed.docshed (below) formats any code blocks in documentation source_code = docshed( source_code, refactor=refactor, first_party_imports=first_party_imports, min_version=min_version, _location=_location, ) # Remove any extra trailing whitespace return source_code.rstrip() + "\n"
def test_fix_code_should_handle_pyflakes_recursion_error_gracefully(self): code = 'x = [{}]'.format('+'.join(['abc' for _ in range(2000)])) self.assertEqual( code, autoflake.fix_code(code))
def test_fix_code_with_empty_string(self): self.assertEqual('', autoflake.fix_code(''))
def lint(self, filename, file): output = autoflake.fix_code(''.join(file)).splitlines(True) return self.process_output(output, filename, file)
def test_fix_code_with_empty_string(self): self.assertEqual( '', autoflake.fix_code(''))