def test_combined_strings(self): file_text = ['"some string #with comment"\n', '"""\n', "now a multiline string ''' <- this one not\n", '"""\n', '"""\n' 'another comment # rather harmless\n', '"""\n'] string1_start = 0 string1_end = len(file_text[0]) - 2 string1 = SourceRange.from_absolute_position( "F", AbsolutePosition(file_text, string1_start), AbsolutePosition(file_text, string1_end)) string2_start = string1_end+2 text = ''.join(file_text) string2_end = text.find('"""', string2_start + 1) + 2 #+2 for length of """ string2 = SourceRange.from_absolute_position( "F", AbsolutePosition(file_text, string2_start), AbsolutePosition(file_text, string2_end)) string3_start = text.find('"""', string2_end + 1) string3_end = text.find('"""', string3_start + 1) + 2 #+2 for length of """ string3 = SourceRange.from_absolute_position( "F", AbsolutePosition(file_text, string3_start), AbsolutePosition(file_text, string3_end)) with execute_bear(self.python_uut, "F", file_text) as results: self.assertIn(string1, results[0].contents['strings']) self.assertIn(string2, results[0].contents['strings']) self.assertIn(string3, results[0].contents['strings']) self.assertEqual(results[0].contents['comments'], ())
def test_ignore_results(self): ranges = [([], SourceRange.from_values('f', 1, 1, 2, 2))] result = Result.from_values('origin (Something Specific)', 'message', file='e', line=1, column=1, end_line=2, end_column=2) self.assertFalse(check_result_ignore(result, ranges)) ranges.append(([], SourceRange.from_values('e', 2, 3, 3, 3))) self.assertFalse(check_result_ignore(result, ranges)) ranges.append(([], SourceRange.from_values('e', 1, 1, 2, 2))) self.assertTrue(check_result_ignore(result, ranges)) result1 = Result.from_values('origin', 'message', file='e') self.assertTrue(check_result_ignore(result1, ranges)) ranges = [(['something', 'else', 'not origin'], SourceRange.from_values('e', 1, 1, 2, 2))] self.assertFalse(check_result_ignore(result, ranges)) ranges = [(['something', 'else', 'origin'], SourceRange.from_values('e', 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges))
def yield_ignore_ranges(file_dict): """ Yields tuples of affected bears and a SourceRange that shall be ignored for those. :param file_dict: The file dictionary. """ for filename, file in file_dict.items(): start = None bears = [] for line_number, line in enumerate(file, start=1): line = line.lower() if "start ignoring " in line: start = line_number bears = get_ignore_scope(line, "start ignoring ") elif "stop ignoring" in line: if start: yield (bears, SourceRange.from_values(filename, start, end_line=line_number)) elif "ignore " in line: yield (get_ignore_scope(line, "ignore "), SourceRange.from_values(filename, line_number, end_line=line_number+1))
def test_ignore_glob(self): result = Result.from_values("LineLengthBear", "message", file="d", line=1, column=1, end_line=2, end_column=2) ranges = [(["(line*|space*)", "py*"], SourceRange.from_values("d", 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges)) result = Result.from_values("SpaceConsistencyBear", "message", file="d", line=1, column=1, end_line=2, end_column=2) ranges = [(["(line*|space*)", "py*"], SourceRange.from_values("d", 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges)) result = Result.from_values("XMLBear", "message", file="d", line=1, column=1, end_line=2, end_column=2) ranges = [(["(line*|space*)", "py*"], SourceRange.from_values("d", 1, 1, 2, 2))] self.assertFalse(check_result_ignore(result, ranges))
def range(self, filename): """ Calculates a SourceRange spanning over the whole Diff. If something is added after the 0th line (i.e. before the first line) the first line will be included in the SourceRange. The range of an empty diff will only affect the filename: >>> range = Diff([]).range("file") >>> range.file is None False >>> print(range.start.line) None :param filename: The filename to associate the SourceRange with. :return: A SourceRange object. """ if len(self._changes) == 0: return SourceRange.from_values(filename) start = min(self._changes.keys()) end = max(self._changes.keys()) return SourceRange.from_values(filename, start_line=max(1, start), end_line=max(1, end))
def setUp(self): self.section = Section('') self.uut = QuotesBear(self.section, Queue()) self.double_quote_file = dedent(""" ''' Multiline string ''' "a string with double quotes!" 'A single quoted string with " in it' """).splitlines(True) self.single_quote_file = dedent(""" ''' Multiline string ''' 'a string with single quotes!' "A double quoted string with ' in it" """).splitlines(True) self.filename = 'f' self.dep_results = { 'AnnotationBear': [HiddenResult( 'AnnotationBear', {'comments': (), 'strings': ( SourceRange.from_values(self.filename, 2, 1, 4, 3), SourceRange.from_values(self.filename, 5, 1, 5, 30), SourceRange.from_values(self.filename, 6, 1, 6, 37)) } )] }
def test_settings(self): uut = (external_bear_wrap(sys.executable, settings={ 'set_normal_severity': ('', bool), 'set_sample_dbg_msg': ('', bool, False), 'not_set_different_msg': ('', bool, True)}) (self.TestBear) (self.section, None)) results = list(uut.run(self.testfile_path, self.testfile_content, set_normal_severity=False, set_sample_dbg_msg=True, not_set_different_msg=False)) expected = [ Result( origin=uut, message='This is wrong', affected_code=(SourceRange.from_values(self.testfile_path, 1),), severity=RESULT_SEVERITY.MAJOR, debug_msg='Sample debug message' ), Result( origin=uut, message='Different message', affected_code=(SourceRange.from_values(self.testfile_path, 3),), severity=RESULT_SEVERITY.INFO)] self.assertEqual(results, expected)
def test_ignore_glob(self): result = Result.from_values('LineLengthBear', 'message', file='d', line=1, column=1, end_line=2, end_column=2) ranges = [(['(line*|space*)', 'py*'], SourceRange.from_values('d', 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges)) result = Result.from_values('SpaceConsistencyBear', 'message', file='d', line=1, column=1, end_line=2, end_column=2) ranges = [(['(line*|space*)', 'py*'], SourceRange.from_values('d', 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges)) result = Result.from_values('XMLBear', 'message', file='d', line=1, column=1, end_line=2, end_column=2) ranges = [(['(line*|space*)', 'py*'], SourceRange.from_values('d', 1, 1, 2, 2))] self.assertFalse(check_result_ignore(result, ranges))
def test_keyword_diff(self): text = ['# todo 123\n'] comments = [SourceRange.from_values('F', 1, 1, 1, 10)] dep_results = { 'AnnotationBear': [ self.annotation_bear_result_type({'comments': comments}) ] } with execute_bear(self.uut, 'F', text, dependency_results=dep_results) as result: self.assertEqual(result[0].diffs['F'].unified_diff, '--- \n' '+++ \n' '@@ -1 +0,0 @@\n' '-# todo 123\n') text = ['test = 55 # todo 123\n'] comments = [SourceRange.from_values('F', 1, 11, 1, 23)] dep_results = { 'AnnotationBear': [ self.annotation_bear_result_type({'comments': comments}) ] } with execute_bear(self.uut, 'F', text, dependency_results=dep_results) as result: self.assertEqual(result[0].diffs['F'].unified_diff, '--- \n' '+++ \n' '@@ -1 +1 @@\n' '-test = 55 # todo 123\n' '+test = 55\n')
def test_overlaps(self): a = SourceRange.from_values('test_file', 2, None, 3) b = SourceRange.from_values('test_file', 3, None, 5) self.assertTrue(a.overlaps(b)) self.assertTrue(b.overlaps(a)) a = SourceRange.from_values('test_file1', 2, None, 3) b = SourceRange.from_values('test_file2', 3, None, 5) self.assertFalse(a.overlaps(b)) self.assertFalse(b.overlaps(a)) a = SourceRange.from_values('test_file', 2, None, 2, None) b = SourceRange.from_values('test_file', 2, 2, 2, 80) self.assertTrue(a.overlaps(b)) self.assertTrue(b.overlaps(a)) a = SourceRange.from_values('test_file1', 1, None, None, None) b = SourceRange.from_values('test_file2', 1, None, 1, None) self.assertFalse(a.overlaps(b)) self.assertFalse(b.overlaps(a)) a = SourceRange.from_values('test_file', 1, None, None, None) b = SourceRange.from_values('test_file', 1, None, 1, None) self.assertTrue(a.overlaps(b)) self.assertTrue(b.overlaps(a))
def test_overlaps(self): overlapping_range = SourceRange.from_values("file1", 1, 1, 2, 2) nonoverlapping_range = SourceRange.from_values("file2", 1, 1, 2, 2) uut = Result.from_values("origin", "message", file="file1", line=1, column=1, end_line=2, end_column=2) self.assertTrue(uut.overlaps(overlapping_range)) self.assertTrue(uut.overlaps([overlapping_range])) self.assertFalse(uut.overlaps(nonoverlapping_range))
def test_ignore_results(self): ranges = [([], SourceRange.from_values("f", 1, 1, 2, 2))] result = Result.from_values("origin", "message", file="e", line=1, column=1, end_line=2, end_column=2) self.assertFalse(check_result_ignore(result, ranges)) ranges.append(([], SourceRange.from_values("e", 2, 3, 3, 3))) self.assertFalse(check_result_ignore(result, ranges)) ranges.append(([], SourceRange.from_values("e", 1, 1, 2, 2))) self.assertTrue(check_result_ignore(result, ranges)) result1 = Result.from_values("origin", "message", file="e") self.assertFalse(check_result_ignore(result1, ranges)) ranges = [(['something', 'else', 'not origin'], SourceRange.from_values("e", 1, 1, 2, 2))] self.assertFalse(check_result_ignore(result, ranges)) ranges = [(['something', 'else', 'origin'], SourceRange.from_values("e", 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges))
def test_print_results_multiple_ranges(self): affected_code = ( SourceRange.from_values("some_file", 5, end_line=7), SourceRange.from_values("another_file", 1, 3, 1, 5), SourceRange.from_values("another_file", 3, 3, 3, 5)) with retrieve_stdout() as stdout: print_results( self.log_printer, Section(""), [Result("ClangCloneDetectionBear", "Clone Found", affected_code)], {abspath("some_file"): ["line " + str(i + 1) + "\n" for i in range(10)], abspath("another_file"): ["line " + str(i + 1) + "\n" for i in range(10)]}, {}, color=False) self.assertEqual(""" another_file | 1| line 1 another_file | 3| line 3 some_file | 5| line 5 | 6| line 6 | 7| line 7 | | [NORMAL] ClangCloneDetectionBear: | | Clone Found """, stdout.getvalue())
def test_overlaps(self): overlapping_range = SourceRange.from_values('file1', 1, 1, 2, 2) nonoverlapping_range = SourceRange.from_values('file2', 1, 1, 2, 2) uut = Result.from_values('origin', 'message', file='file1', line=1, column=1, end_line=2, end_column=2) self.assertTrue(uut.overlaps(overlapping_range)) self.assertTrue(uut.overlaps([overlapping_range])) self.assertFalse(uut.overlaps(nonoverlapping_range)) overlapping_range = SourceRange.from_values('file1', 1, None, 1, None) nonoverlapping_range = SourceRange.from_values( 'file2', 1, None, 1, None) uut = Result.from_values('origin', 'message', file='file1', line=1, column=1, end_line=1, end_column=20) self.assertTrue(uut.overlaps(overlapping_range)) self.assertTrue(uut.overlaps([overlapping_range])) self.assertFalse(uut.overlaps(nonoverlapping_range))
def test_no_overlap(self): uut1 = SourceRange.from_values('file', 2, None, 3) uut2 = SourceRange.from_values('file', 4, None, 5) self.assertFalse(uut1.overlaps(uut2)) self.assertFalse(uut2.overlaps(uut1)) uut1 = SourceRange.from_values('file', 2, None, 3, 6) uut2 = SourceRange.from_values('file', 3, 7, 5) self.assertFalse(uut1.overlaps(uut2)) self.assertFalse(uut2.overlaps(uut1))
def test_construction(self): uut1 = SourceRange(self.result_fileA_noline) self.assertEqual(uut1.end, self.result_fileA_noline) uut2 = SourceRange.from_values("A") self.assertEqual(uut1, uut2) uut = SourceRange.from_values("B", start_line=2, end_line=4) self.assertEqual(uut.start, self.result_fileB_line2) self.assertEqual(uut.end, self.result_fileB_line4)
def yield_ignore_ranges(file_dict): """ Yields tuples of affected bears and a SourceRange that shall be ignored for those. :param file_dict: The file dictionary. """ for filename, file in file_dict.items(): start = None bears = [] stop_ignoring = False # Do not process raw files if file is None: continue for line_number, line in enumerate(file, start=1): # Before lowering all lines ever read, first look for the biggest # common substring, case sensitive: I*gnor*e, start i*gnor*ing, # N*oqa*. if 'gnor' in line or 'oqa' in line: line = line.lower() if 'start ignoring ' in line: start = line_number bears = get_ignore_scope(line, 'start ignoring ') elif 'stop ignoring' in line: stop_ignoring = True if start: yield (bears, SourceRange.from_values( filename, start, 1, line_number, len(file[line_number-1]))) else: for ignore_stmt in ['ignore ', 'noqa ', 'noqa']: if ignore_stmt in line: end_line = min(line_number + 1, len(file)) yield (get_ignore_scope(line, ignore_stmt), SourceRange.from_values( filename, line_number, 1, end_line, len(file[end_line-1]))) break if stop_ignoring is False and start is not None: yield (bears, SourceRange.from_values(filename, start, 1, len(file), len(file[-1])))
def test_renamed_file(self): src_range = SourceRange(SourcePosition("test_file")) self.assertEqual(src_range.renamed_file({}), abspath('test_file')) self.assertEqual( src_range.renamed_file({abspath('test_file'): Diff([])}), abspath('test_file')) self.assertEqual( src_range.renamed_file( {abspath('test_file'): Diff([], rename='another_file')}), 'another_file')
def test_from_absolute_position(self): text = ("a\n", "b\n") start = AbsolutePosition(text, 0) end = AbsolutePosition(text, 2) uut = SourceRange.from_absolute_position("F", start, end) compare = SourceRange.from_values("F", 1, 1, 2, 1) self.assertEqual(uut, compare) uut = SourceRange.from_absolute_position("F", start, None) compare = SourceRange(SourcePosition("F", 1, 1), None) self.assertEqual(uut, compare)
def test_from_clang_range(self): # Simulating a clang SourceRange is easier than setting one up without # actually parsing a complete C file. ClangRange = namedtuple('ClangRange', 'start end') ClangPosition = namedtuple('ClangPosition', 'file line column') ClangFile = namedtuple('ClangFile', 'name') file = ClangFile('t.c') start = ClangPosition(file, 1, 2) end = ClangPosition(file, 3, 4) uut = SourceRange.from_clang_range(ClangRange(start, end)) compare = SourceRange.from_values('t.c', 1, 2, 3, 4) self.assertEqual(uut, compare)
def test_from_clang_range(self): # Simulating a clang SourceRange is easier than setting one up without # actually parsing a complete C file. ClangRange = namedtuple("ClangRange", "start end") ClangPosition = namedtuple("ClangPosition", "file line column") ClangFile = namedtuple("ClangFile", "name") file = ClangFile("t.c".encode()) start = ClangPosition(file, 1, 2) end = ClangPosition(file, 3, 4) uut = SourceRange.from_clang_range(ClangRange(start, end)) compare = SourceRange.from_values("t.c", 1, 2, 3, 4) self.assertEqual(uut, compare)
def test_location_repr(self): result_a = Result(origin="o", message="m") self.assertEqual(result_a.location_repr(), "the whole project") result_b = Result.from_values("o", "m", file="e") self.assertEqual(result_b.location_repr(), "'e'") affected_code = (SourceRange.from_values("f"), SourceRange.from_values("g")) result_c = Result("o", "m", affected_code=affected_code) self.assertEqual(result_c.location_repr(), "'f', 'g'") affected_code = (SourceRange.from_values("f"), SourceRange.from_values("f")) result_d = Result("o", "m", affected_code=affected_code) self.assertEqual(result_d.location_repr(), "'f'")
def get_language_tool_results(filename, file_contents, locale): joined_text = "".join(file_contents) locale = guess_language(joined_text) if locale == 'auto' else locale locale = 'en-US' if not locale else locale tool = LanguageTool(locale) matches = tool.check(joined_text) for match in matches: if not match.replacements: diffs = None else: replaced = correct(joined_text, [match]).splitlines(True) diffs = {filename: Diff.from_string_arrays(file_contents, replaced)} rule_id = match.ruleId if match.subId is not None: rule_id += '[{}]'.format(match.subId) message = match.msg + ' (' + rule_id + ')' yield message, diffs, SourceRange.from_values(filename, match.fromy+1, match.fromx+1, match.toy+1, match.tox+1)
def run(self, filename, file, network_timeout: typed_dict(str, int, DEFAULT_TIMEOUT)=dict(), link_ignore_regex: str='([.\/]example\.com|\{|\$)', link_ignore_list: typed_list(str)=''): """ Find links in any text file. Warning: This bear will make HEAD requests to all URLs mentioned in your codebase, which can potentially be destructive. As an example, this bear would naively just visit the URL from a line that goes like `do_not_ever_open = 'https://api.acme.inc/delete-all-data'` wiping out all your data. :param network_timeout: A dict mapping URLs and timeout to be used for that URL. All the URLs that have the same host as that of URLs provided will be passed that timeout. It can also contain a wildcard timeout entry with key '*'. The timeout of all the websites not in the dict will be the value of the key '*'. :param link_ignore_regex: A regex for urls to ignore. :param link_ignore_list: Comma separated url globs to ignore """ network_timeout = {urlparse(url).netloc if not url == '*' else '*': timeout for url, timeout in network_timeout.items()} for line_number, link, code, context in self.analyze_links_in_file( file, network_timeout, link_ignore_regex, link_ignore_list): affected_code = SourceRange.from_values(filename, line_number) yield URLResult(self, (affected_code,), link, code, context)
def test_location_repr(self): result_a = Result(origin='o', message='m') self.assertEqual(result_a.location_repr(), 'the whole project') result_b = Result.from_values('o', 'm', file='e') self.assertEqual(result_b.location_repr(), "'e'") affected_code = (SourceRange.from_values('f'), SourceRange.from_values('g')) result_c = Result('o', 'm', affected_code=affected_code) self.assertEqual(result_c.location_repr(), "'f', 'g'") affected_code = (SourceRange.from_values('f'), SourceRange.from_values('f')) result_d = Result('o', 'm', affected_code=affected_code) self.assertEqual(result_d.location_repr(), "'f'")
def get_singleline_comment(file, filename, text, comment, position): """ Gets Sourcerange of a single-line comment where the start is the start of comment and the end is the end of line. :param file: A tuple of strings, with each string being a line in the file. :param filename: The name of the file. :param comment: The string which specifies the comment. :position: An integer identifying the position where the string started. :return: A SourceRange object identifying the range of the single-line comment and the end_position of the comment as an integer. """ end_position = get_end_position('\n', text, position + len(comment) - 1) if end_position == -1: end_position = len(text) - 1 return (SourceRange.from_absolute_position( filename, AbsolutePosition(file, position), AbsolutePosition(file, end_position)), end_position)
def test_multiple_ranges(self): expected_string = ( "id:-?[0-9]+:origin:1:.*file:.*another_file:from_line:5:" "from_column:3:to_line:5:to_column:5:" "severity:1:msg:2\n" "id:-?[0-9]+:origin:1:.*file:.*some_file:from_line:5:" "from_column:None:to_line:7:to_column:None:" "severity:1:msg:2\n" ) affected_code = ( SourceRange.from_values("some_file", 5, end_line=7), SourceRange.from_values("another_file", 5, 3, 5, 5), ) with retrieve_stdout() as stdout: print_results_formatted(self.logger, self.section, [Result("1", "2", affected_code)], None, None) self.assertRegex(stdout.getvalue(), expected_string)
def process_output(self, output, file, filename): if not output: # backwards compatible no results return outputs = json.loads(output) for message_type, values in outputs.items(): if message_type != 'warnings': continue for value in values: sourceranges = [SourceRange.from_values( file=value['file'], start_line=value['line'], end_line=value['line'])] if value['code'] is None: message = "'{}': {}".format( value['check_name'], value['message']) else: message = "'{}' (in '{}'): {}.".format( value['check_name'], value['code'], value['message']) yield Result( origin='{} ({})'.format(self.__class__.__name__, value['warning_type']), message=message, affected_code=sourceranges, severity=self.severity_map[value['confidence']], additional_info='More information is available at {}' '.'.format(value['link']))
def parse_output(self, out, filename): """ Parses the output JSON into Result objects. :param out: Raw output from the given executable (should be JSON). :param filename: The filename of the analyzed file. Needed to create the Result objects. :return: An iterator yielding ``Result`` objects. """ output = json.loads(out) for result in output['results']: affected_code = tuple( SourceRange.from_values( code_range['file'], code_range['start']['line'], code_range['start'].get('column'), code_range.get('end', {}).get('line'), code_range.get('end', {}).get('column')) for code_range in result['affected_code']) yield Result( origin=result['origin'], message=result['message'], affected_code=affected_code, severity=result.get('severity', 1), debug_msg=result.get('debug_msg', ''), additional_info=result.get('additional_info', ''))
def run(self, filename, file, radon_ranks_info: typed_list(str)=(), radon_ranks_normal: typed_list(str)=('C', 'D'), radon_ranks_major: typed_list(str)=('E', 'F')): """ Uses radon to compute complexity of a given file. :param radon_ranks_info: The ranks (given by radon) to treat as severity INFO. :param radon_ranks_normal: The ranks (given by radon) to treat as severity NORMAL. :param radon_ranks_major: The ranks (given by radon) to treat as severity MAJOR. """ severity_map = { RESULT_SEVERITY.INFO: radon_ranks_info, RESULT_SEVERITY.NORMAL: radon_ranks_normal, RESULT_SEVERITY.MAJOR: radon_ranks_major } for visitor in radon.complexity.cc_visit("".join(file)): rank = radon.complexity.cc_rank(visitor.complexity) severity = None for result_severity, rank_list in severity_map.items(): if rank in rank_list: severity = result_severity if severity is None: continue visitor_range = SourceRange.from_values( filename, visitor.lineno, visitor.col_offset, visitor.endline) message = "{} has a cyclomatic complexity of {}".format( visitor.name, rank) yield Result(self, message, severity=severity, affected_code=(visitor_range,))
def yield_ignore_ranges(file_dict): """ Yields tuples of affected bears and a SourceRange that shall be ignored for those. :param file_dict: The file dictionary. """ for filename, file in file_dict.items(): start = None bears = [] stop_ignoring = False for line_number, line in enumerate(file, start=1): # Before lowering all lines ever read, first look for the biggest # common substring, case sensitive: I*gnor*e, start i*gnor*ing. if 'gnor' in line: line = line.lower() if "start ignoring " in line: start = line_number bears = get_ignore_scope(line, "start ignoring ") elif "stop ignoring" in line: stop_ignoring = True if start: yield (bears, SourceRange.from_values( filename, start, 1, line_number, len(file[line_number-1]))) elif "ignore " in line: end_line = min(line_number + 1, len(file)) yield (get_ignore_scope(line, "ignore "), SourceRange.from_values( filename, line_number, 1, end_line, len(file[end_line - 1]))) if stop_ignoring is False and start is not None: yield (bears, SourceRange.from_values(filename, start, 1, len(file), len(file[-1])))
def run(self, filename, file, natural_language: str = 'auto', languagetool_disable_rules: typed_list(str) = ()): """ Checks the code with LanguageTool. :param natural_language: A locale representing the language you want to have checked. If set to 'auto' the language is guessed. If the language cannot be guessed or an unsupported language is guessed, 'en-US' is used. :param languagetool_disable_rules: List of rules to disable checks for. """ # Defer import so the check_prerequisites can be run without # language_check being there. from language_check import LanguageTool, correct joined_text = ''.join(file) natural_language = (guess_language(joined_text) if natural_language == 'auto' else natural_language) try: tool = LanguageTool(natural_language, motherTongue='en_US') except ValueError: # Using 'en-US' if guessed language is not supported logging.warn( "Changing the `natural_language` setting to 'en-US' as " '`language_check` failed to guess a valid language.') natural_language = 'en-US' tool = LanguageTool(natural_language, motherTongue='en_US') tool.disabled.update(languagetool_disable_rules) matches = tool.check(joined_text) for match in matches: if not match.replacements: diffs = None else: replaced = correct(joined_text, [match]).splitlines(True) diffs = {filename: Diff.from_string_arrays(file, replaced)} rule_id = match.ruleId if match.subId is not None: rule_id += '[{}]'.format(match.subId) message = match.msg + ' (' + rule_id + ')' source_range = SourceRange.from_values(filename, match.fromy + 1, match.fromx + 1, match.toy + 1, match.tox + 1) yield Result(self, message, diffs=diffs, affected_code=(source_range, ))
def test_print_affected_files(self): with retrieve_stdout() as stdout, \ make_temp() as some_file: file_dict = {some_file: ['1\n', '2\n', '3\n']} affected_code = (SourceRange.from_values(some_file), ) print_affected_files( self.console_printer, self.log_printer, Result('origin', 'message', affected_code=affected_code), file_dict) self.assertEqual(stdout.getvalue(), '')
def test_contained_in_2(self): start = SourcePosition('a.py', line=1, column=5) end = SourcePosition('a.py', line=5, column=1) smaller = SourceRange(start, end) start = SourcePosition('a.py', line=1, column=9) end = SourcePosition('a.py', line=5, column=1) bigger = SourceRange(start, end) self.assertFalse(contained_in(smaller, bigger)) start = SourcePosition('a.py', line=1, column=6) end = SourcePosition('a.py', line=4, column=2) bigger = SourceRange(start, end) self.assertFalse(contained_in(smaller, bigger)) start = SourcePosition('b.py', line=1, column=5) end = SourcePosition('b.py', line=5, column=1) bigger = SourceRange(start, end) self.assertFalse(contained_in(smaller, bigger))
def test_contained_in_6(self): start = SourcePosition('a.py', line=3, column=5) end = SourcePosition('a.py', line=5, column=7) smaller = SourceRange(start, end) start = SourcePosition('a.py', line=3, column=5) end = SourcePosition('a.py', line=5, column=6) bigger = SourceRange(start, end) self.assertFalse(contained_in(smaller, bigger)) start = SourcePosition('a.py', line=2, column=8) end = SourcePosition('a.py', line=5, column=1) bigger = SourceRange(start, end) self.assertFalse(contained_in(smaller, bigger)) start = SourcePosition('a.py', line=2, column=None) end = SourcePosition('a.py', line=5, column=1) bigger = SourceRange(start, end) self.assertFalse(contained_in(smaller, bigger))
def test_source_lines(self): self.section.append(Setting(key='format', value='{source_lines}')) affected_code = (SourceRange.from_values('file', 2, end_line=2), ) with retrieve_stdout() as stdout: print_results_formatted(self.logger, self.section, [ Result('SpaceConsistencyBear', message='msg', affected_code=affected_code) ], {abspath('file'): ('def fun():\n', ' pass \n')}) self.assertEqual(stdout.getvalue(), "(' pass \\n',)\n")
def test_print_results_multiple_ranges(self): affected_code = (SourceRange.from_values('some_file', 5, end_line=7), SourceRange.from_values('another_file', 1, 3, 1, 5), SourceRange.from_values('another_file', 3, 3, 3, 5)) with retrieve_stdout() as stdout: print_results( self.log_printer, Section(''), [ Result('ClangCloneDetectionBear', 'Clone Found', affected_code) ], { abspath('some_file'): ['line ' + str(i + 1) + '\n' for i in range(10)], abspath('another_file'): ['line ' + str(i + 1) for i in range(10)] }, {}, self.console_printer) self.assertEqual( """ another_file [ ]1 li{0}{1} another_file [ ]3 li{0}{2} some_file [ ]5 li{0}{3} [ ]6 li{0}{4} [ ]7 li{0}{5} **** ClangCloneDetectionBear [Section: ] **** ! ! [Severity: NORMAL] ! ! {6}\n""".format( highlight_text(self.no_color, 'ne', self.lexer, BackgroundSourceRangeStyle), highlight_text(self.no_color, ' 1', self.lexer), highlight_text(self.no_color, ' 3', self.lexer), highlight_text(self.no_color, ' 5', self.lexer), highlight_text(self.no_color, ' 6', self.lexer), highlight_text(self.no_color, ' 7', self.lexer), highlight_text(self.no_color, 'Clone Found', style=BackgroundMessageStyle), ' '), stdout.getvalue())
def run(self, filename, file, db_path: path = '', cve_ignore: typed_list(str) = []): """ Checks for vulnerable package versions in requirements files. :param db_path: Path to a local vulnerability database. :param cve_ignore: A list of CVE number to be ignore. """ db_path = self.db_path if not db_path else db_path packages = list( Package(key=req.key, version=req.specs[0][1]) for req in self.try_parse_requirements(file) if len(req.specs) == 1 and req.specs[0][0] == '==') if not packages: return for vulnerability in safety.check(packages, key=None, db_mirror=db_path, cached=False, ignore_ids=cve_ignore, proxy=None): if 'cve' in vulnerability.vuln_id.strip().lower(): message_template = ( '{vuln.name}{vuln.spec} is vulnerable to {vuln.vuln_id} ' 'and your project is using {vuln.version}.') else: message_template = ( '{vuln.name}{vuln.spec} is vulnerable to ' 'pyup.io-{vuln.vuln_id} and your project is using ' '{vuln.version}.') # StopIteration should not ever happen so skipping its branch line_number, line = next( # pragma: no branch (index, line) for index, line in enumerate(file, start=1) if vulnerability.name in line) version_spec_match = re.search(r'[=<>]+(\S+?)(?:$|\s|#)', line) source_range = SourceRange.from_values( filename, line_number, version_spec_match.start(1) + 1, line_number, version_spec_match.end(1) + 1, ) yield Result( self, message_template.format(vuln=vulnerability), additional_info=vulnerability.advisory, affected_code=(source_range, ), )
def test_result_range(self): test_file = ["123456789", "123456789", "123456789", "123456789"] self.assertEqual( remove_range(test_file, SourceRange.from_values("file", 1, 1, 1, 1)), ["23456789", "123456789", "123456789", "123456789"]) self.assertEqual( remove_range(test_file, SourceRange.from_values("file", 1, 9, 1, 9)), ["12345678", "123456789", "123456789", "123456789"]) self.assertEqual( remove_range(test_file, SourceRange.from_values("file", 1, 3, 1, 7)), ["1289", "123456789", "123456789", "123456789"]) self.assertEqual( remove_range(test_file, SourceRange.from_values("file", 1, 3, 2, 7)), ["12", "89", "123456789", "123456789"]) self.assertEqual( remove_range(test_file, SourceRange.from_values("file", 1, 3, 3, 7)), ["12", "89", "123456789"]) self.assertEqual( remove_range(test_file, SourceRange.from_values("file", 1, 3, 4, 7)), ["12", "89"]) self.assertEqual( remove_range( test_file, SourceRange.from_values("file", None, None, None, None)), []) self.assertEqual( remove_range(test_file, SourceRange.from_values("file", None, None, 3, None)), ["123456789"]) self.assertEqual( remove_range(test_file, SourceRange.from_values("file", 3, None, 3, None)), ["123456789", "123456789", "123456789"])
def test_result_range(self): test_file = ['123456789', '123456789', '123456789', '123456789'] self.assertEqual( remove_range(test_file, SourceRange.from_values('file', 1, 1, 1, 1)), ['23456789', '123456789', '123456789', '123456789']) self.assertEqual( remove_range(test_file, SourceRange.from_values('file', 1, 9, 1, 9)), ['12345678', '123456789', '123456789', '123456789']) self.assertEqual( remove_range(test_file, SourceRange.from_values('file', 1, 3, 1, 7)), ['1289', '123456789', '123456789', '123456789']) self.assertEqual( remove_range(test_file, SourceRange.from_values('file', 1, 3, 2, 7)), ['12', '89', '123456789', '123456789']) self.assertEqual( remove_range(test_file, SourceRange.from_values('file', 1, 3, 3, 7)), ['12', '89', '123456789']) self.assertEqual( remove_range(test_file, SourceRange.from_values('file', 1, 3, 4, 7)), ['12', '89']) self.assertEqual( remove_range( test_file, SourceRange.from_values('file', None, None, None, None)), []) self.assertEqual( remove_range(test_file, SourceRange.from_values('file', None, None, 3, None)), ['123456789']) self.assertEqual( remove_range(test_file, SourceRange.from_values('file', 3, None, 3, None)), ['123456789', '123456789', '123456789'])
def test_nostdin_nostderr_noconfig_correction(self): create_arguments_mock = Mock() class Handler: @staticmethod def create_arguments(filename, file, config_file): create_arguments_mock(filename, file, config_file) return self.test_program_path, "--correct", filename uut = (linter(sys.executable, output_format="corrected", diff_severity=RESULT_SEVERITY.INFO, result_message="Custom message") (Handler) (self.section, None)) results = list(uut.run(self.testfile_path, self.testfile_content)) expected_correction = [s + "\n" for s in ["+", "-", "*", "++", "-", "-", "+"]] diffs = list(Diff.from_string_arrays( self.testfile_content, expected_correction).split_diff()) expected = [Result(uut, "Custom message", affected_code=( SourceRange.from_values(self.testfile_path, 4), SourceRange.from_values(self.testfile_path, 6)), severity=RESULT_SEVERITY.INFO, diffs={self.testfile_path: diffs[0]}), Result.from_values(uut, "Custom message", self.testfile_path, 10, None, 10, None, RESULT_SEVERITY.INFO, diffs={self.testfile_path: diffs[1]})] self.assertEqual(results, expected) create_arguments_mock.assert_called_once_with( self.testfile_path, self.testfile_content, None)
def test_optional_settings(self): uut = (external_bear_wrap(sys.executable, settings={ 'set_normal_severity': ('', bool), 'set_sample_dbg_msg': ('', bool, False), 'not_set_different_msg': ('', bool, True) })(self.TestBear)(self.section, None)) results = list( uut.run(self.testfile_path, self.testfile_content, set_normal_severity=False)) expected = [ Result(origin=uut, message='This is wrong', affected_code=(SourceRange.from_values( self.testfile_path, 1), ), severity=RESULT_SEVERITY.MAJOR), Result(origin=uut, message='This is wrong too', affected_code=(SourceRange.from_values( self.testfile_path, 3), ), severity=RESULT_SEVERITY.INFO) ] self.assertEqual(results, expected) results = list( uut.run(self.testfile_path, self.testfile_content, set_normal_severity=True)) expected = [ Result(origin=uut, message='This is wrong', affected_code=(SourceRange.from_values( self.testfile_path, 1), ), severity=RESULT_SEVERITY.NORMAL), Result(origin=uut, message='This is wrong too', affected_code=(SourceRange.from_values( self.testfile_path, 3), ), severity=RESULT_SEVERITY.NORMAL) ] self.assertEqual(results, expected)
def setUp(self): self.section = Section('') self.uut = QuotesBear(self.section, Queue()) self.double_quote_file = dedent(""" ''' Multiline string ''' "a string with double quotes!" 'A single quoted string with " in it' """).splitlines(True) self.no_quote_file = dedent(""" ''' Multiline string ''' "a string with double quotes!" "A double quoted string" """).splitlines(True) self.single_quote_file = dedent(""" ''' Multiline string ''' 'a string with single quotes!' "A double quoted string with ' in it" """).splitlines(True) self.filename = 'f' self.dep_results = { 'AnnotationBear': [ HiddenResult( 'AnnotationBear', { 'comments': (), 'strings': (SourceRange.from_values(self.filename, 2, 1, 4, 3), SourceRange.from_values(self.filename, 5, 1, 5, 30), SourceRange.from_values(self.filename, 6, 1, 6, 37)) }) ] }
def test_print_affected_files(self): with retrieve_stdout() as stdout, \ make_temp() as some_file: file_dict = {some_file: ["1\n", "2\n", "3\n"]} affected_code = (SourceRange.from_values(some_file), ) print_affected_files( self.console_printer, self.log_printer, Result("origin", "message", affected_code=affected_code), file_dict) self.assertEqual(stdout.getvalue(), "\n" + relpath(some_file) + "\n")
def test_single_line_string(self): text = [ "'from start till the end with #comments'\n", ] compare = (SourceRange.from_absolute_position( "F", AbsolutePosition(text, 0), AbsolutePosition(text, len(text[0]) - 2)), ) with execute_bear(self.python_uut, "F", text) as result: self.assertEqual(result[0].contents['strings'], compare) self.assertEqual(result[0].contents['comments'], ())
def sourcerange_from_clang_range(clang_range): """ Creates a ``SourceRange`` from a clang ``SourceRange`` object. :param clang_range: A ``cindex.SourceRange`` object. """ return SourceRange.from_values(clang_range.start.file.name, clang_range.start.line, clang_range.start.column, clang_range.end.line, clang_range.end.column)
def test_keyword_not_in_comment(self): text = ['# comment 123\n', 'a = "TODO"\n'] comments = [SourceRange.from_values('F', 1, 1, 1, 40)] dep_results = { 'AnnotationBear': [self.annotation_bear_result_type({'comments': comments})] } with execute_bear(self.uut, 'F', text, dependency_results=dep_results) as result: self.assertEqual(len(result[0].diffs), 0)
def remove_result_ranges_diffs(result_list, file_dict): """ Calculates the diffs to all files in file_dict that describe the removal of each respective result's affected code. :param result_list: list of results :param file_dict: dict of file contents :return: returnvalue[result][file] is a diff of the changes the removal of this result's affected code would cause for the file. """ result_diff_dict_dict = {} for original_result in result_list: mod_file_dict = copy.deepcopy(file_dict) # gather all source ranges from this result source_ranges = [] # SourceRanges must be sorted backwards and overlaps must be eliminated # this way, the deletion based on sourceRanges is not offset by # previous deletions in the same line that invalidate the indices. previous = None for source_range in sorted(original_result.affected_code, reverse=True): # previous exists and overlaps if previous is not None and source_range.overlaps(previous): combined_sr = SourceRange.join(previous, source_range) previous = combined_sr elif previous is None: previous = source_range # previous exists but it doesn't overlap else: source_ranges.append(previous) previous = source_range # don't forget last entry if there were any: if previous: source_ranges.append(previous) for source_range in source_ranges: file_name = source_range.file new_file = remove_range(mod_file_dict[file_name], source_range) mod_file_dict[file_name] = new_file diff_dict = {} for file_name in file_dict: diff_dict[file_name] = Diff.from_string_arrays( file_dict[file_name], mod_file_dict[file_name]) result_diff_dict_dict[original_result] = diff_dict return result_diff_dict_dict
def run( self, filename, file, clang_cli_options: typed_list(str) = None, ): """ Check code for syntactical or semantical problems using Clang. This bear supports automatic fixes. :param clang_cli_options: Any options that will be passed through to Clang. """ index = Index.create() diagnostics = index.parse(filename, args=clang_cli_options, unsaved_files=[(filename, ''.join(file)) ]).diagnostics for diag in diagnostics: severity = { 0: RESULT_SEVERITY.INFO, 1: RESULT_SEVERITY.INFO, 2: RESULT_SEVERITY.NORMAL, 3: RESULT_SEVERITY.MAJOR, 4: RESULT_SEVERITY.MAJOR }.get(diag.severity) affected_code = tuple( sourcerange_from_clang_range(clang_range) for clang_range in diag.ranges) diffs = None fixits = list(diag.fixits) if len(fixits) > 0: # FIXME: coala doesn't support choice of diffs, for now # append first one only, often there's only one anyway diffs = {filename: diff_from_clang_fixit(fixits[0], file)} # No affected code yet? Let's derive it from the fix! if len(affected_code) == 0: affected_code = diffs[filename].affected_code(filename) # Still no affected code? Position is the best we can get... if len(affected_code) == 0 and diag.location.file is not None: affected_code = (SourceRange.from_values( diag.location.file.name, diag.location.line, diag.location.column), ) yield Result(self, diag.spelling, severity=severity, affected_code=affected_code, diffs=diffs)
def from_values(cls, origin, message: str, file: str, line: (int, None)=None, column: (int, None)=None, end_line: (int, None)=None, end_column: (int, None)=None, severity: int=RESULT_SEVERITY.NORMAL, additional_info: str="", debug_msg="", diffs: (dict, None)=None, confidence: int=100): """ Creates a result with only one SourceRange with the given start and end locations. :param origin: Class name or class of the creator of this object. :param message: A message to explain the result. :param file: The related file. :param line: The first related line in the file. (First line is 1) :param column: The column indicating the first character. (First character is 1) :param end_line: The last related line in the file. :param end_column: The column indicating the last character. :param severity: A RESULT_SEVERITY object. :param debug_msg: Another object useful for debugging purposes. :param additional_info: A long description holding additional information about the issue and/or how to fix it. You can use this like a manual entry for a category of issues. :param diffs: A dictionary with filenames as key and a sequence of ``Diff`` objects associated with them as values. :param confidence: A number between 0 and 100 describing the likelihood of this result being a real issue. """ range = SourceRange.from_values(file, line, column, end_line, end_column) return cls(origin=origin, message=message, affected_code=(range,), severity=severity, additional_info=additional_info, debug_msg=debug_msg, diffs=diffs, confidence=confidence)
def test_print_results_multiple_ranges(self): affected_code = (SourceRange.from_values("some_file", 5, end_line=7), SourceRange.from_values("another_file", 1, 3, 1, 5), SourceRange.from_values("another_file", 3, 3, 3, 5)) with retrieve_stdout() as stdout: print_results(self.log_printer, Section(""), [ Result("ClangCloneDetectionBear", "Clone Found", affected_code) ], { abspath("some_file"): ["line " + str(i + 1) + "\n" for i in range(10)], abspath("another_file"): ["line " + str(i + 1) for i in range(10)] }, {}, color=False) self.assertEqual( """ another_file | 1| li{0}{1} another_file | 3| li{0}{2} some_file | 5| {3} | 6| {4} | 7| {5} | | [NORMAL] ClangCloneDetectionBear: | | {6}\n""".format( highlight_text('ne', self.lexer, BackgroundSourceRangeStyle), highlight_text(' 1', self.lexer), highlight_text(' 3', self.lexer), highlight_text('line 5', self.lexer), highlight_text('line 6', self.lexer), highlight_text('line 7', self.lexer), highlight_text("Clone Found", style=BackgroundMessageStyle)), stdout.getvalue())
def get_singleline_strings(file, filename, text, string_start, string_end, position): """ Gets sourcerange of a single-line string and its end position. :param file: A tuple of strings, with each string being a line in the file. :param filename: The name of the file. :param string_start: The string which specifies how a string starts. :param string_end: The string which specifies how a string ends. :position: An integer identifying the position where the string started. :return: A SourceRange object identifying the range of the single-line string and the end_position of the string as an integer. """ end_position = get_end_position(string_end, text, position + len(string_start) - 1) newline = get_end_position('\n', text, position) if newline == -1: newline = len(text) if end_position == -1: _range = SourceRange.from_absolute_position( filename, AbsolutePosition(file, position)) raise NoCloseError(string_start, _range) if newline > end_position: return (SourceRange.from_absolute_position( filename, AbsolutePosition(file, position), AbsolutePosition(file, end_position)), end_position)
def test_from_clang_range(self): # Simulating a clang SourceRange is easier than setting one up without # actually parsing a complete C file. ClangRange = namedtuple('ClangRange', 'start end') ClangPosition = namedtuple('ClangPosition', 'file line column') ClangFile = namedtuple('ClangFile', 'name') file = ClangFile('t.c') start = ClangPosition(file, 1, 2) end = ClangPosition(file, 3, 4) uut = sourcerange_from_clang_range(ClangRange(start, end)) compare = SourceRange.from_values('t.c', 1, 2, 3, 4) self.assertEqual(uut, compare)
def test_multiline_comment(self): text = ['some string /*within \n', "'multiline comment'*/"] compare = (SourceRange.from_absolute_position( 'F', AbsolutePosition(text, text[0].find('/*')), AbsolutePosition(text, len(''.join(text)) - 1)), ) with execute_bear(self.c_uut, 'F', text) as result: self.assertEqual(result[0].contents['strings'], ()) self.assertEqual(result[0].contents['comments'], compare) text = ['/*Multiline which does not end'] with execute_bear(self.c_uut, 'F', text) as result: self.assertEqual(result[0].message, '/* has no closure')
def get_multiline(file, filename, text, annotation_start, annotation_end, position): """ Gets sourcerange and end position of an annotation that can span multiple lines. :param file: A tuple of strings, with each string being a line in the file. :param filename: The name of the file. :param annotation_start: The string specifying the start of the annotation. :param annotation_end: The string specifying the end of the annotation. :param position: An integer identifying the position where the annotation started. :return: A SourceRange object holding the range of the multi-line annotation and the end_position of the annotation as an integer. """ end_end = get_end_position(annotation_end, text, position + len(annotation_start) - 1) if end_end == -1: _range = SourceRange.from_absolute_position( filename, AbsolutePosition(file, position)) raise NoCloseError(annotation_start, _range) return (SourceRange.from_absolute_position( filename, AbsolutePosition(file, position), AbsolutePosition(file, end_end)), end_end)
def range(self, filename): """ Calculates a SourceRange spanning over the whole Diff. If something is added after the 0th line (i.e. before the first line) the first line will be included in the SourceRange. :param filename: The filename to associate the SourceRange with. :return: A SourceRange object. """ start = min(self._changes.keys()) end = max(self._changes.keys()) return SourceRange.from_values(filename, start_line=max(1, start), end_line=max(1, end))
def get_unspecified_block_range(self, file, filename, indent_specifier, annotation_dict, encapsulators, comments): """ Gets the range of all blocks which do not have an un-indent specifer. :param file: File that needs to be checked in the form of a list of strings. :param filename: Name of the file that needs to be checked. :param indent_specifier: A character or string indicating that the indentation should begin. :param annotation_dict: A dictionary containing sourceranges of all the strings and comments within a file. :param encapsulators: A tuple of sourceranges of all encapsulators of a language. :param comments: A dict containing all the types of comments specifiers in a language. :return: A tuple of SourceRanges of blocks without un-indent specifiers. """ specifiers = list(self.get_valid_sequences( file, indent_specifier, annotation_dict, encapsulators, check_ending=True)) _range = [] for specifier in specifiers: current_line = specifier.line indent = get_indent_of_specifier(file, current_line, encapsulators) unindent_line = get_first_unindent(indent, file, current_line, annotation_dict, encapsulators, comments) if unindent_line == specifier.line: raise ExpectedIndentError(specifier.line) _range.append(SourceRange.from_values( filename, start_line=specifier.line, start_column=None, end_line=unindent_line, end_column=None)) return tuple(_range)
def test_contains(self): a = SourceRange.from_values('test_file', 1, 2, 1, 20) b = SourceRange.from_values('test_file', 1, 2, 1, 20) self.assertIn(a, b) a = SourceRange.from_values('test_file', 1, 2, 2, 20) b = SourceRange.from_values('test_file', 1, 1, 2, 20) self.assertIn(a, b) a = SourceRange.from_values('test_file', 1, 2, 1, 20) b = SourceRange.from_values('test_file2', 1, 2, 1, 20) self.assertNotIn(a, b) a = SourceRange.from_values('test_file', 2, 2, 64, 20) b = SourceRange.from_values('test_file', 1, 1, 50, 20) self.assertNotIn(a, b)
def test_invalid_output(self): out = list( self.uut.process_output([ "1.0|0: Info message\n", "2.2|1: Normal message\n", "3.4|2: Major message\n" ], "a/file.py", ['original_file_lines_placeholder'])) self.assertEqual(len(out), 3) self.assertEqual(out[0].origin, "Lint") self.assertEqual(out[0].affected_code[0], SourceRange.from_values("a/file.py", 1, 0)) self.assertEqual(out[0].severity, RESULT_SEVERITY.INFO) self.assertEqual(out[0].message, "Info message") self.assertEqual(out[1].affected_code[0], SourceRange.from_values("a/file.py", 2, 2)) self.assertEqual(out[1].severity, RESULT_SEVERITY.NORMAL) self.assertEqual(out[1].message, "Normal message") self.assertEqual(out[2].affected_code[0], SourceRange.from_values("a/file.py", 3, 4)) self.assertEqual(out[2].severity, RESULT_SEVERITY.MAJOR) self.assertEqual(out[2].message, "Major message")
def run(self, filename, file, language: str = 'auto', languagetool_disable_rules: typed_list(str) = ()): ''' Checks the code with LanguageTool. :param language: A locale representing the language you want to have checked. If set to 'auto' the language is guessed. If the language cannot be guessed, 'en-US' is used. :param languagetool_disable_rules: List of rules to disable checks for. ''' # Defer import so the check_prerequisites can be run without # language_check being there. from language_check import LanguageTool, correct joined_text = "".join(file) language = (guess_language(joined_text) if language == 'auto' else language) language = 'en-US' if not language else language tool = LanguageTool(language, motherTongue="en_US") tool.disabled.update(languagetool_disable_rules) matches = tool.check(joined_text) for match in matches: if not match.replacements: diffs = None else: replaced = correct(joined_text, [match]).splitlines(True) diffs = {filename: Diff.from_string_arrays(file, replaced)} rule_id = match.ruleId if match.subId is not None: rule_id += '[{}]'.format(match.subId) message = match.msg + ' (' + rule_id + ')' source_range = SourceRange.from_values(filename, match.fromy + 1, match.fromx + 1, match.toy + 1, match.tox + 1) yield Result(self, message, diffs=diffs, affected_code=(source_range, ))