def test_result_range_inline_overlap(self): test_file = ["123456789\n"] test_file_dict = {abspath("test_file"): test_file} source_range1 = SourceRange.from_values("test_file", 1, 1, 1, 4) source_range2 = SourceRange.from_values("test_file", 1, 2, 1, 3) source_range3 = SourceRange.from_values("test_file", 1, 3, 1, 6) test_result = Result("origin", "message", (source_range1, source_range2, source_range3)) result_diff = remove_result_ranges_diffs( [test_result], test_file_dict)[test_result][abspath("test_file")] expected_diff = Diff.from_string_arrays(test_file, ["789\n"]) self.assertEqual(result_diff, expected_diff)
def run(self, filename, file): """ Check for the correct spelling of ``coala`` in the file. """ corrected = [] for line in file: wrong_spelling = r'C([oO][aA][lL][aA])' corrected += [ re.sub(wrong_spelling, lambda match: 'c' + match.group(1), line) ] diffs = Diff.from_string_arrays(file, corrected).split_diff() for diff in diffs: yield Result(self, "``coala`` is always written with a lower case ``c``", affected_code=(diff.range(filename), ), diffs={filename: diff}, severity=RESULT_SEVERITY.MAJOR)
def test_result_range_line_wise_overlap(self): test_file = ["11", "22", "33", "44", "55", "66"] test_file_dict = {abspath("test_file"): test_file} source_range1 = SourceRange.from_values("test_file", 2, 2, 5, 1) source_range2 = SourceRange.from_values("test_file", 3, 1, 4, 1) test_result = Result("origin", "message", (source_range1, source_range2)) result_diff = remove_result_ranges_diffs( [test_result], test_file_dict)[test_result][abspath("test_file")] expected_diff = Diff.from_string_arrays(test_file, ["11", "2", "5", "66"]) self.assertEqual(result_diff, expected_diff)
def test_ask_for_actions_and_apply5(self): failed_actions = set() action = ApplyPatchAction() action2 = ChainPatchAction() args = [ self.console_printer, Section(''), [action.get_metadata(), action2.get_metadata()], { 'ApplyPatchAction': action, 'ChainPatchAction': action2 }, failed_actions, Result('origin', 'message'), {}, {}, {} ] with simulate_console_inputs('c', 'i') as generator: ask_for_action_and_apply(*args) self.assertEqual(generator.last_input, 1) self.assertNotIn('ChainPatchAction', failed_actions)
def _process_corrected(self, output, filename, file): """ Process the output and use it to create Results by creating diffs. The diffs are created by comparing the output and the original file. :param output: The corrected file contents. :param filename: The name of the file. :param file: The original contents of the file. :return: Generator which gives Results produced based on the diffs created by comparing the original and corrected contents. """ for diff in self.__yield_diffs(file, output): yield Result(self, self.diff_message, affected_code=(diff.range(filename), ), diffs={filename: diff}, severity=self.diff_severity)
def test_default_input_apply_single_test(self): action = TestAction() do_nothing_action = DoNothingAction() apply_single = 'Test (A)ction' se = Section('cli') args = [ self.console_printer, se, [do_nothing_action.get_metadata(), action.get_metadata()], { 'DoNothingAction': do_nothing_action, 'TestAction': action }, set(), Result('origin', 'message'), {}, {}, {}, apply_single ] with simulate_console_inputs('a') as generator: self.assertFalse(ask_for_action_and_apply(*args))
def run( self, filename, file, max_line_length: int = 79, indent_size: int = SpacingHelper.DEFAULT_TAB_WIDTH, pep_ignore: typed_list(str) = (), pep_select: typed_list(str) = (), local_pep8_config: bool = False, ): """ Detects and fixes PEP8 incompliant code. This bear will not change functionality of the code in any way. :param max_line_length: Maximum number of characters for a line. When set to 0 allows infinite line length. :param indent_size: Number of spaces per indentation level. :param pep_ignore: A list of errors/warnings to ignore. :param pep_select: A list of errors/warnings to exclusively apply. :param local_pep8_config: Set to true if autopep8 should use a config file as if run normally from this directory. """ if not max_line_length: max_line_length = sys.maxsize options = { 'ignore': pep_ignore, 'select': pep_select, 'max_line_length': max_line_length, 'indent_size': indent_size } corrected = autopep8.fix_code(''.join(file), apply_config=local_pep8_config, options=options).splitlines(True) diffs = Diff.from_string_arrays(file, corrected).split_diff() for diff in diffs: yield Result(self, 'The code does not comply to PEP8.', affected_code=(diff.range(filename), ), diffs={filename: diff})
def run( self, filename, file, cyclomatic_complexity: int = 8, ): """ Check for all functions if they are too complicated using the cyclomatic complexity metric. You can read more about this metric at <https://www.wikiwand.com/en/Cyclomatic_complexity>. :param cyclomatic_complexity: Maximum cyclomatic complexity that is considered to be normal. The value of 10 had received substantial corroborating evidence. But the general recommendation: "For each module, either limit cyclomatic complexity to [the agreed-upon limit] or provide a written explanation of why the limit was exceeded." """ root = Index.create().parse(filename).cursor for cursor, complexity in self.complexities(root, filename): if complexity > cyclomatic_complexity: affected_code = (sourcerange_from_clang_range(cursor.extent), ) yield Result( self, "The function '{function}' should be simplified. Its " 'cyclomatic complexity is {complexity} which exceeds ' 'maximal recommended value ' 'of {rec_value}.'.format(function=cursor.displayname, complexity=complexity, rec_value=cyclomatic_complexity), affected_code=affected_code, additional_info=( 'The cyclomatic complexity is a metric that measures ' 'how complicated a function is by counting branches ' 'and exits of each function.\n\n' 'Your function seems to be complicated and should be ' 'refactored so that it can be understood by other ' 'people easily.\n\nSee ' '<http://www.wikiwand.com/en/Cyclomatic_complexity>' ' for more information.'))
def run(self, filename, file, language: str='auto', languagetool_disable_rules: typed_list(str)=()): ''' Checks the code with LanguageTool. :param language: A locale representing the language you want to have checked. If set to 'auto' the language is guessed. If the language cannot be guessed, 'en-US' is used. :param languagetool_disable_rules: List of rules to disable checks for. ''' joined_text = "".join(file) language = (guess_language(joined_text) if language == 'auto' else language) language = 'en-US' if not language else language tool = LanguageTool(language, motherTongue="en_US") tool.disabled.update(languagetool_disable_rules) matches = tool.check(joined_text) for match in matches: if not match.replacements: diffs = None else: replaced = correct(joined_text, [match]).splitlines(True) diffs = {filename: Diff.from_string_arrays(file, replaced)} rule_id = match.ruleId if match.subId is not None: rule_id += '[{}]'.format(match.subId) message = match.msg + ' (' + rule_id + ')' source_range = SourceRange.from_values(filename, match.fromy+1, match.fromx+1, match.toy+1, match.tox+1) yield Result(self, message, diffs=diffs, affected_code=(source_range,))
def test_print_results_missing_file(self): self.log_printer = LogPrinter(NullPrinter()) with retrieve_stdout() as stdout: print_results( self.log_printer, Section(""), [Result("t", "msg"), Result.from_values("t", "msg", file="file", line=5)], {}, {}, color=False) self.assertEqual("\n" + STR_PROJECT_WIDE + "\n" "| | [NORMAL] t:\n" "| | msg\n" # Second results file isn't there, no context is # printed, only a warning log message which we # don't catch "| | [NORMAL] t:\n" "| | msg\n", stdout.getvalue())
def test_output(self): """ Validating that the yielded results are correct. """ affected_code = (SourceRange.from_values(self.filename, start_line=111, start_column=1, end_line=143, end_column=2), ) expected_result = Result( self.bear, "The function 'levels(int, int, int)' should be simplified. Its " "cyclomatic complexity is 10 which exceeds maximal recommended " "value of 8.", affected_code=affected_code) with execute_bear(self.bear, self.filename, self.file, 8) as out: self.assertEqual(len(out), 1) out[0].additional_info = "" # Let's not test this, static and huge self.assertEqual(out[0], expected_result)
def test_print_results_multiple_ranges(self): affected_code = (SourceRange.from_values('some_file', 5, end_line=7), SourceRange.from_values('another_file', 1, 3, 1, 5), SourceRange.from_values('another_file', 3, 3, 3, 5)) with retrieve_stdout() as stdout: print_results( self.log_printer, Section(''), [ Result('ClangCloneDetectionBear', 'Clone Found', affected_code) ], { abspath('some_file'): ['line ' + str(i + 1) + '\n' for i in range(10)], abspath('another_file'): ['line ' + str(i + 1) for i in range(10)] }, {}, self.console_printer) self.assertEqual( """ another_file [ ]1 li{0}{1} another_file [ ]3 li{0}{2} some_file [ ]5 li{0}{3} [ ]6 li{0}{4} [ ]7 li{0}{5} **** ClangCloneDetectionBear [Section: ] **** ! ! [Severity: NORMAL] ! ! {6}\n""".format( highlight_text(self.no_color, 'ne', self.lexer, BackgroundSourceRangeStyle), highlight_text(self.no_color, ' 1', self.lexer), highlight_text(self.no_color, ' 3', self.lexer), highlight_text(self.no_color, ' 5', self.lexer), highlight_text(self.no_color, ' 6', self.lexer), highlight_text(self.no_color, ' 7', self.lexer), highlight_text(self.no_color, 'Clone Found', style=BackgroundMessageStyle), ' '), stdout.getvalue())
def test_print_results_missing_file(self): self.log_printer.log_level = logging.CRITICAL with retrieve_stdout() as stdout: print_results(self.log_printer, Section(''), [ Result('t', 'msg'), Result.from_values('t', 'msg', file='file', line=5) ], {}, {}, self.console_printer) self.assertEqual( '\n' + STR_PROJECT_WIDE + '\n' '| | [NORMAL] t:\n' '| | {0}\n' # Second results file isn't there, no context is # printed, only a warning log message which we # don't catch '| | [NORMAL] t:\n' '| | {0}\n'.format( highlight_text( self.no_color, 'msg', style=BackgroundMessageStyle)), stdout.getvalue())
def test_json_diff(self): file_dict = {'f_a': ['1', '2', '3'], 'f_b': ['1', '2', '3']} diff = Diff(file_dict['f_a']) diff.delete_line(2) diff.modify_line(3, '3_changed') uut = Result('origin', 'msg', diffs={'f_a': diff}).__json__(True) self.assertEqual( uut['diffs']['f_a'].__json__(), '--- \n' '+++ \n' '@@ -1,3 +1,2 @@\n' ' 1\n' '-2\n' '-3\n' '+3_changed') JSONEncoder = create_json_encoder(use_relpath=True) json_dump = json.dumps(diff, cls=JSONEncoder, sort_keys=True) self.assertEqual( json_dump, '"--- \\n+++ \\n@@ -1,3 +1,2 @@\\n 1\\n-2\\n-3\\n+3_changed"')
def run(self, filename, file, tab_width: int = 2): """ This bear features a simple algorithm to calculate the right indentation for Matlab/Octave code. However, it will not handle hanging indentation or conditions ranging over several lines yet. :param tab_width: Number of spaces per indentation level. """ new_file = list(self.reindent(file, tab_width)) if new_file != file: wholediff = Diff.from_string_arrays(file, new_file) for diff in wholediff.split_diff(): yield Result( self, 'The indentation could be changed to improve readability.', severity=RESULT_SEVERITY.INFO, affected_code=(diff.range(filename), ), diffs={filename: diff})
def test_ask_for_actions_and_apply(self): failed_actions = set() action = TestAction() do_nothing_action = DoNothingAction() args = [self.console_printer, Section(''), [do_nothing_action.get_metadata(), action.get_metadata()], {'DoNothingAction': do_nothing_action, 'TestAction': action}, failed_actions, Result('origin', 'message'), {}, {}, {}] with simulate_console_inputs('a', 'param1', 'a', 'param2') as generator: action.apply = unittest.mock.Mock(side_effect=AssertionError) ask_for_action_and_apply(*args) self.assertEqual(generator.last_input, 1) self.assertIn('TestAction', failed_actions) action.apply = lambda *args, **kwargs: {} ask_for_action_and_apply(*args) self.assertEqual(generator.last_input, 3) self.assertNotIn('TestAction', failed_actions)
def test_is_applicable(self): result1 = Result('', '') result2 = Result.from_values('', '', '') result3 = Result.from_values('', '', 'file') invalid_result = '' self.assertEqual(OpenEditorAction.is_applicable(result1, None, {}), 'The result is not associated with any source code.') self.assertTrue(OpenEditorAction.is_applicable(result2, None, {})) # Check non-existent file self.assertEqual( OpenEditorAction.is_applicable(result3, None, {}), "The result is associated with source code that doesn't " 'seem to exist.') with self.assertRaises(TypeError): OpenEditorAction.is_applicable(invalid_result, None, {})
def run(self, filename, file, use_spaces: bool = True, tab_width: int = SpacingHelper.DEFAULT_TAB_WIDTH, max_line_length: int = 80, use_parentheses_in_import: bool = True, sort_imports_by_length: bool = False, isort_multi_line_output: int = 4): """ Sorts imports for python. :param use_spaces: True if spaces are to be used instead of tabs. :param tab_width: Number of spaces per indent level. :param max_line_length: Maximum number of characters for a line. :param use_parentheses_in_import: True if parenthesis are to be used in import statements. :param sort_imports_by_length: Set to true to sort imports by length instead of alphabetically. :param isort_multi_line_output: The type of formatting to be used by isort when indenting imports. This value if passed to isort as the `multi_line_output` setting. """ indent = "Tab" if use_spaces == False else tab_width new_file = tuple( SortImports( file_contents=''.join(file), line_length=max_line_length, indent=indent, multi_line_output=isort_multi_line_output, use_parentheses=use_parentheses_in_import, length_sort=sort_imports_by_length).output.splitlines(True)) if new_file != tuple(file): diff = Diff.from_string_arrays(file, new_file) yield Result(self, "Imports can be sorted.", affected_code=diff.affected_code(filename), diffs={filename: diff})
def process_output_corrected(self, output, filename, file, diff_severity=RESULT_SEVERITY.NORMAL, result_message="Inconsistency found.", diff_distance=1): """ Processes the executable's output as a corrected file. :param output: The output of the program. This can be either a single string or a sequence of strings. :param filename: The filename of the file currently being corrected. :param file: The contents of the file currently being corrected. :param diff_severity: The severity to use for generating results. :param result_message: The message to use for generating results. :param diff_distance: Number of unchanged lines that are allowed in between two changed lines so they get yielded as one diff. If a negative distance is given, every change will be yielded as an own diff, even if they are right beneath each other. :return: An iterator returning results containing patches for the file to correct. """ if isinstance(output, str): output = (output, ) for string in output: for diff in Diff.from_string_arrays( file, string.splitlines(keepends=True)).split_diff( distance=diff_distance): yield Result(self, result_message, affected_code=diff.affected_code(filename), diffs={filename: diff}, severity=diff_severity)
def test_nostdin_nostderr_noconfig_correction(self): create_arguments_mock = Mock() class Handler: @staticmethod def create_arguments(filename, file, config_file): create_arguments_mock(filename, file, config_file) return self.test_program_path, "--correct", filename uut = (linter(sys.executable, output_format="corrected", diff_severity=RESULT_SEVERITY.INFO, result_message="Custom message") (Handler) (self.section, None)) results = list(uut.run(self.testfile_path, self.testfile_content)) expected_correction = [s + "\n" for s in ["+", "-", "*", "++", "-", "-", "+"]] diffs = list(Diff.from_string_arrays( self.testfile_content, expected_correction).split_diff()) expected = [Result(uut, "Custom message", affected_code=( SourceRange.from_values(self.testfile_path, 4), SourceRange.from_values(self.testfile_path, 6)), severity=RESULT_SEVERITY.INFO, diffs={self.testfile_path: diffs[0]}), Result.from_values(uut, "Custom message", self.testfile_path, 10, None, 10, None, RESULT_SEVERITY.INFO, diffs={self.testfile_path: diffs[1]})] self.assertEqual(results, expected) create_arguments_mock.assert_called_once_with( self.testfile_path, self.testfile_content, None)
def run(self, dependency_results, appveyor_ci: bool = False, **kwargs): """ Most CI allow commits to skip CI build by including sequences like [skip ci] or [ci skip] anywhere in the commit message. AppVeyor CI supports [skip appveyor] in addition to the above but only in the commit title. This bear checks the HEAD commit to see if it disables CI build and return result accordingly. Supported CI include AppVeyor, Bitrise, Circle CI, GitLab CI, Scrutinizer, Semaphore, Shippable, Travis CI and wercker. :param appveyor_ci: Whether AppVeyor is used by the project or not. """ if appveyor_ci: self.SKIP_CI_REGEX += r'|\[skip appveyor\]' for result in dependency_results[GitCommitMetadataBear.name]: if appveyor_ci: pos = result.raw_commit_message.find('\n') commit_title = result.raw_commit_message[:pos] if ( pos != -1) else result.raw_commit_message else: commit_title = result.raw_commit_message match = re.search(self.SKIP_CI_REGEX, commit_title) if not match: continue all_files = (result.modified_files + result.added_files + result.deleted_files) for file in all_files: for pattern in self.section['files']: if file not in glob.glob(pattern): continue yield Result( self, 'This commit modifies a file that has ' 'pattern of type "%s", thus should ' 'not disable CI build.' % pattern)
def run(self, filename, file, language: str, coalang_dir: str = None): """ Finds out all the positions of strings and comments in a file. The Bear searches for valid comments and strings and yields their ranges as SourceRange objects in HiddenResults. :param language: Language to be whose annotations are to be searched. :param coalang_dir: external directory for coalang file. :return: HiddenResults containing a dictionary with keys as 'strings' or 'comments' and values as a tuple of SourceRanges of strings and a tuple of SourceRanges of comments respectively. """ try: lang_dict = LanguageDefinition(language, coalang_dir=coalang_dir) except FileNotFoundError: content = ("coalang specification for " + language + " not found.") yield HiddenResult(self, content) return string_delimiters = dict(lang_dict["string_delimiters"]) multiline_string_delimiters = dict( lang_dict["multiline_string_delimiters"]) multiline_comment_delimiters = dict( lang_dict["multiline_comment_delimiters"]) comment_delimiter = dict(lang_dict["comment_delimiter"]) string_ranges = comment_ranges = () try: string_ranges, comment_ranges = self.find_annotation_ranges( file, filename, string_delimiters, multiline_string_delimiters, comment_delimiter, multiline_comment_delimiters) except NoCloseError as e: yield Result(self, str(e), severity=RESULT_SEVERITY.MAJOR, affected_code=(e.code, )) content = {"strings": string_ranges, "comments": comment_ranges} yield HiddenResult(self, content)
def run( self, filename, file, radon_ranks_info: typed_list(str) = (), radon_ranks_normal: typed_list(str) = ('C', 'D'), radon_ranks_major: typed_list(str) = ('E', 'F'), ): """ Uses radon to compute complexity of a given file. :param radon_ranks_info: The ranks (given by radon) to treat as severity INFO. :param radon_ranks_normal: The ranks (given by radon) to treat as severity NORMAL. :param radon_ranks_major: The ranks (given by radon) to treat as severity MAJOR. """ severity_map = { RESULT_SEVERITY.INFO: radon_ranks_info, RESULT_SEVERITY.NORMAL: radon_ranks_normal, RESULT_SEVERITY.MAJOR: radon_ranks_major } for visitor in radon.complexity.cc_visit(''.join(file)): rank = radon.complexity.cc_rank(visitor.complexity) severity = None for result_severity, rank_list in severity_map.items(): if rank in rank_list: severity = result_severity if severity is None: continue col = visitor.col_offset if visitor.col_offset else None visitor_range = SourceRange.from_values(filename, visitor.lineno, col, visitor.endline) message = '{} has a cyclomatic complexity of {}'.format( visitor.name, rank) yield Result(self, message, severity=severity, affected_code=(visitor_range, ))
def test_autoapply_override(self): """ Tests that the default_actions aren't automatically applied when the autoapply setting overrides that. """ self.section.append(Setting('default_actions', 'somebear: PrintDebugMessageAction')) # Verify that it would apply the action, i.e. remove the result results = [5, HiddenResult('origin', []), Result('somebear', 'message', debug_msg='debug')] retval, newres = print_result(results, {}, 0, lambda *args: None, self.section, self.log_printer, {}, []) self.assertEqual(newres, []) # Override and verify that result is unprocessed, i.e. not gone self.section.append(Setting('autoapply', 'false')) retval, newres = print_result(results, {}, 0, lambda *args: None, self.section, self.log_printer, {}, []) self.assertNotEqual(newres, [])
def retrieve_results(self, filename, file, **kwargs): """ Yields results using the self.GET_REPLACEMENT function. :param filename: The filename, just pass it over as you got it! :param file: The file, just pass it over as you got it! :param kwargs: Any keyword arguments that will be passed to the GET_REPLACEMENT function. Please provide cli_options if you don't override the default. """ new_file, errors = self.GET_REPLACEMENT(file=file, **kwargs) self.__print_errors(errors) for diff in self.__yield_diffs(file, new_file): yield Result( self, self.RESULT_MESSAGE, affected_code=(diff.range(filename),), diffs={filename: diff}, severity=self.SEVERITY)
def test_is_applicable(self): with self.assertRaises(TypeError) as context: IgnoreResultAction.is_applicable('str', {}, {}) self.assertEqual( IgnoreResultAction.is_applicable( Result.from_values('origin', 'msg', "file doesn't exist", 2), {}, {}), "The result is associated with source code that doesn't " 'seem to exist.') self.assertEqual( IgnoreResultAction.is_applicable(Result('', ''), {}, {}), 'The result is not associated with any source code.') with make_temp() as f_a: self.assertTrue( IgnoreResultAction.is_applicable( Result.from_values('origin', 'msg', f_a, 2), {}, {}))
def test_string_dict(self): uut = Result(None, '') output = uut.to_string_dict() self.assertEqual( output, { 'id': str(uut.id), 'origin': '', 'message': '', 'file': '', 'line_nr': '', 'severity': 'NORMAL', 'debug_msg': '', 'additional_info': '', 'confidence': '100' }) uut = Result.from_values(origin='origin', message='msg', file='file', line=2, severity=RESULT_SEVERITY.INFO, additional_info='hi!', debug_msg='dbg', confidence=50) output = uut.to_string_dict() self.assertEqual( output, { 'id': str(uut.id), 'origin': 'origin', 'message': 'msg', 'file': abspath('file'), 'line_nr': '2', 'severity': 'INFO', 'debug_msg': 'dbg', 'additional_info': 'hi!', 'confidence': '50' }) uut = Result.from_values(origin='o', message='m', file='f', line=5) output = uut.to_string_dict() self.assertEqual(output['line_nr'], '5')
def test_string_dict(self): uut = Result(None, "") output = uut.to_string_dict() self.assertEqual( output, { "id": str(uut.id), "origin": "", "message": "", "file": "", "line_nr": "", "severity": "NORMAL", "debug_msg": "", "additional_info": "", "confidence": "100" }) uut = Result.from_values(origin="origin", message="msg", file="file", line=2, severity=RESULT_SEVERITY.INFO, additional_info="hi!", debug_msg="dbg", confidence=50) output = uut.to_string_dict() self.assertEqual( output, { "id": str(uut.id), "origin": "origin", "message": "msg", "file": abspath("file"), "line_nr": "2", "severity": "INFO", "debug_msg": "dbg", "additional_info": "hi!", "confidence": "50" }) uut = Result.from_values(origin="o", message="m", file="f", line=5) output = uut.to_string_dict() self.assertEqual(output["line_nr"], "5")
def run(self, filename, file): """ Checks for vulnerable package versions in requirements files. """ packages = list( Package(key=req.key, version=req.specs[0][1]) for req in self.try_parse_requirements(file) if len(req.specs) == 1 and req.specs[0][0] == '==') if not packages: return for vulnerability in safety.check(packages=packages): if vulnerability.is_cve: message_template = ( '{vuln.name}{vuln.spec} is vulnerable to {vuln.cve_id} ' 'and your project is using {vuln.version}.') else: message_template = ( '{vuln.name}{vuln.spec} is vulnerable and your project is ' 'using {vuln.version}.') # StopIteration should not ever happen so skipping its branch line_number, line = next( # pragma: no branch (index, line) for index, line in enumerate(file, start=1) if vulnerability.name in line) version_spec_match = re.search(r'[=<>]+(\S+?)(?:$|\s|#)', line) source_range = SourceRange.from_values( filename, line_number, version_spec_match.start(1) + 1, line_number, version_spec_match.end(1) + 1, ) yield Result( self, message_template.format(vuln=vulnerability), additional_info=vulnerability.data['advisory'], affected_code=(source_range, ), )
def run(self): """ Looks for missing __init__.py files in directories containing python files. """ dirs = { os.path.split(filename)[0] for filename in self.file_dict.keys() if filename.endswith('.py') } missing_inits = { directory for directory in dirs if not os.path.join(directory, '__init__.py') in self.file_dict } for missing_init_dir in missing_inits: yield Result( self, 'Directory "{}" does not contain __init__.py file'.format( os.path.relpath(missing_init_dir, self.get_config_dir())))