def run(self, found_files): warnings = [] for filepath in found_files.iter_file_paths(): mimetype = mimetypes.guess_type(filepath) if mimetype[0] is None or not mimetype[0].startswith("text/") or mimetype[1] is not None: continue try: contents = read_py_file(filepath) except CouldNotHandleEncoding: continue for line, code, message in check_file_contents(contents): warnings.append({"line": line, "code": code, "message": message, "path": filepath}) messages = [] for warning in warnings: path = warning["path"] prefix = os.path.commonprefix([found_files.rootpath, path]) loc = Location( path, module_from_path(path[len(prefix) :]), "", warning["line"], 0, absolute_path=True, ) msg = Message("dodgy", warning["code"], loc, warning["message"]) messages.append(msg) return messages
def run(self, found_files): messages = [] for code_file in found_files.iter_module_paths(): try: contents = read_py_file(code_file) tree = ast.parse( contents, filename=code_file, ) except CouldNotHandleEncoding as err: messages.append( make_tool_error_message( code_file, 'mccabe', 'MC0000', message='Could not handle the encoding of this file: %s' % err.encoding)) continue except SyntaxError as err: messages.append( make_tool_error_message(code_file, 'mccabe', 'MC0000', line=err.lineno, character=err.offset, message='Syntax Error')) continue except TypeError: messages.append( make_tool_error_message(code_file, 'mccabe', 'MC0000', message='Unable to parse file')) continue visitor = PathGraphingAstVisitor() visitor.preorder(tree, visitor) for graph in visitor.graphs.values(): complexity = graph.complexity() if complexity > self.max_complexity: location = Location(path=code_file, module=None, function=graph.entity, line=graph.lineno, character=0, absolute_path=True) message = Message( source='mccabe', code='MC0001', location=location, message='%s is too complex (%s)' % ( graph.entity, complexity, ), ) messages.append(message) return self.filter_messages(messages)
def run(self, found_files): warnings = [] for filepath in found_files.iter_file_paths(): mimetype = mimetypes.guess_type(filepath) if mimetype[0] is None or not mimetype[0].startswith('text/') or mimetype[1] is not None: continue try: contents = read_py_file(filepath) except CouldNotHandleEncoding: continue for line, code, message in check_file_contents(contents): warnings.append({ 'line': line, 'code': code, 'message': message, 'path': filepath }) messages = [] for warning in warnings: path = warning['path'] prefix = os.path.commonprefix([found_files.rootpath, path]) loc = Location(path, module_from_path(path[len(prefix):]), '', warning['line'], 0, absolute_path=True) msg = Message('dodgy', warning['code'], loc, warning['message']) messages.append(msg) return messages
def find_from_path(path): names = set() try: dirlist = os.listdir(path) except PermissionError as err: raise PermissionMissing(path) from err for item in dirlist: item_path = os.path.abspath(os.path.join(path, item)) if os.path.isdir(item_path): if is_virtualenv(item_path): continue names |= find_from_path(item_path) elif not os.path.islink(item_path) and item_path.endswith(".py"): try: contents = encoding.read_py_file(item_path) names |= find_from_imports(contents) except encoding.CouldNotHandleEncoding as err: # TODO: this output will break output formats such as JSON warnings.warn("{0}: {1}".format(err.path, err.cause), ImportWarning) if len(names) == len(POSSIBLE_LIBRARIES): # don't continue on recursing, there's no point! break return names
def run(self, found_files): warnings = [] for filepath in found_files.iter_file_paths(): mimetype = mimetypes.guess_type(filepath) if mimetype[0] is None or not mimetype[0].startswith( 'text/') or mimetype[1] is not None: continue try: contents = read_py_file(filepath) except CouldNotHandleEncoding: continue for line, code, message in check_file_contents(contents): warnings.append({ 'line': line, 'code': code, 'message': message, 'path': filepath }) messages = [] for warning in warnings: path = warning['path'] prefix = os.path.commonprefix([found_files.rootpath, path]) loc = Location(path, module_from_path(path[len(prefix):]), '', warning['line'], 0, absolute_path=True) msg = Message('dodgy', warning['code'], loc, warning['message']) messages.append(msg) return messages
def run(self, found_files): messages = [] checker = ConventionChecker() for code_file in found_files.iter_module_paths(): try: for error in checker.check_source(read_py_file(code_file), code_file, None): location = Location( path=code_file, module=None, function="", line=error.line, character=0, absolute_path=True, ) message = Message( source="pydocstyle", code=error.code, location=location, message=error.message.partition(":")[2].strip(), ) messages.append(message) except CouldNotHandleEncoding as err: messages.append( make_tool_error_message( code_file, "pydocstyle", "D000", message= f"Could not handle the encoding of this file: {err.encoding}", )) continue except AllError as exc: # pydocstyle's Parser.parse_all method raises AllError when an # attempt to analyze the __all__ definition has failed. This # occurs when __all__ is too complex to be parsed. messages.append( make_tool_error_message( code_file, "pydocstyle", "D000", line=1, character=0, message=exc.args[0], )) continue return self.filter_messages(messages)
def scavenge(self, _=None): # The argument is a list of paths, but we don't care # about that as we use the found_files object. The # argument is here to explicitly acknowledge that we # are overriding the Vulture.scavenge method. for module in self._files.iter_module_paths(): try: module_string = read_py_file(module) except CouldNotHandleEncoding as err: self._internal_messages.append(make_tool_error_message( module, 'vulture', 'V000', message='Could not handle the encoding of this file: %s' % err.encoding )) continue self.file = module self.scan(module_string)
def run(self, found_files): messages = [] checker = PEP257Checker() for code_file in found_files.iter_module_paths(): try: for error in checker.check_source( read_py_file(code_file), code_file, None ): location = Location( path=code_file, module=None, function='', line=error.line, character=0, absolute_path=True, ) message = Message( source='pep257', code=error.code, location=location, message=error.message.partition(':')[2].strip(), ) messages.append(message) except CouldNotHandleEncoding as err: messages.append(make_tool_error_message( code_file, 'pep257', 'D000', message='Could not handle the encoding of this file: %s' % err.encoding )) continue except AllError as exc: # pep257's Parser.parse_all method raises AllError when an # attempt to analyze the __all__ definition has failed. This # occurs when __all__ is too complex to be parsed. messages.append(make_tool_error_message( code_file, 'pep257', 'D000', line=1, character=0, message=exc.args[0] )) continue return self.filter_messages(messages)
def get_suppressions(relative_filepaths, root, messages): """ Given every message which was emitted by the tools, and the list of files to inspect, create a list of files to ignore, and a map of filepath -> line-number -> codes to ignore """ paths_to_ignore = set() lines_to_ignore = defaultdict(set) messages_to_ignore = defaultdict(lambda: defaultdict(set)) # first deal with 'noqa' style messages import ipdb ipdb.set_trace() for filepath in relative_filepaths: abspath = os.path.join(root, filepath) try: file_contents = encoding.read_py_file(abspath).split('\n') except encoding.CouldNotHandleEncoding as err: # TODO: this output will break output formats such as JSON warnings.warn('{0}: {1}'.format(err.path, err.cause), ImportWarning) continue ignore_file, ignore_lines = get_noqa_suppressions(file_contents) if ignore_file: paths_to_ignore.add(filepath) lines_to_ignore[filepath] |= ignore_lines # now figure out which messages were suppressed by pylint pylint_ignore_files, pylint_ignore_messages = _parse_pylint_informational( messages) paths_to_ignore |= pylint_ignore_files for filepath, line in pylint_ignore_messages.items(): for line_number, codes in line.items(): for code in codes: messages_to_ignore[filepath][line_number].add(('pylint', code)) if code in _PYLINT_EQUIVALENTS: for equivalent in _PYLINT_EQUIVALENTS[code]: messages_to_ignore[filepath][line_number].add( equivalent) return paths_to_ignore, lines_to_ignore, messages_to_ignore
def scavenge(self, _=None): # The argument is a list of paths, but we don't care # about that as we use the found_files object. The # argument is here to explicitly acknowledge that we # are overriding the Vulture.scavenge method. for module in self._files.iter_module_paths(): try: module_string = read_py_file(module) except CouldNotHandleEncoding as err: self._internal_messages.append( make_tool_error_message( module, 'vulture', 'V000', message='Could not handle the encoding of this file: %s' % err.encoding)) continue self.file = module self.scan(module_string)
def get_suppressions(relative_filepaths, root, messages): """ Given every message which was emitted by the tools, and the list of files to inspect, create a list of files to ignore, and a map of filepath -> line-number -> codes to ignore """ paths_to_ignore = set() lines_to_ignore = defaultdict(set) messages_to_ignore = defaultdict(lambda: defaultdict(set)) # first deal with 'noqa' style messages for filepath in relative_filepaths: abspath = os.path.join(root, filepath) try: file_contents = encoding.read_py_file(abspath).split('\n') except encoding.CouldNotHandleEncoding as err: # TODO: this output will break output formats such as JSON warnings.warn('{0}: {1}'.format(err.path, err.cause), ImportWarning) continue ignore_file, ignore_lines = get_noqa_suppressions(file_contents) if ignore_file: paths_to_ignore.add(filepath) lines_to_ignore[filepath] |= ignore_lines # now figure out which messages were suppressed by pylint pylint_ignore_files, pylint_ignore_messages = _parse_pylint_informational(messages) paths_to_ignore |= pylint_ignore_files for filepath, line in pylint_ignore_messages.items(): for line_number, codes in line.items(): for code in codes: messages_to_ignore[filepath][line_number].add(('pylint', code)) if code in _PYLINT_EQUIVALENTS: for equivalent in _PYLINT_EQUIVALENTS[code]: messages_to_ignore[filepath][line_number].add(equivalent) return paths_to_ignore, lines_to_ignore, messages_to_ignore
def find_from_path(path): names = set() max_possible = len(POSSIBLE_LIBRARIES) for item in os.listdir(path): item_path = os.path.abspath(os.path.join(path, item)) if os.path.isdir(item_path): if is_virtualenv(item_path): continue names |= find_from_path(item_path) elif not os.path.islink(item_path) and item_path.endswith('.py'): try: contents = encoding.read_py_file(item_path) names |= find_from_imports(contents) except encoding.CouldNotHandleEncoding as err: # TODO: this output will break output formats such as JSON warnings.warn('{0}: {1}'.format(err.path, err.cause), ImportWarning) if len(names) == max_possible: # don't continue on recursing, there's no point! break return names