def test_ignore_glob(self): result = Result.from_values('LineLengthBear', 'message', file='d', line=1, column=1, end_line=2, end_column=2) ranges = [(['(line*|space*)', 'py*'], SourceRange.from_values('d', 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges)) result = Result.from_values('SpaceConsistencyBear', 'message', file='d', line=1, column=1, end_line=2, end_column=2) ranges = [(['(line*|space*)', 'py*'], SourceRange.from_values('d', 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges)) result = Result.from_values('XMLBear', 'message', file='d', line=1, column=1, end_line=2, end_column=2) ranges = [(['(line*|space*)', 'py*'], SourceRange.from_values('d', 1, 1, 2, 2))] self.assertFalse(check_result_ignore(result, ranges))
def test_is_applicable(self): with self.assertRaises(TypeError) as context: IgnoreResultAction.is_applicable('str', {}, {}) self.assertEqual( IgnoreResultAction.is_applicable( Result.from_values('origin', 'msg', "file doesn't exist", 2), {}, {} ), "The result is associated with source code that doesn't " 'seem to exist.' ) self.assertEqual( IgnoreResultAction.is_applicable( Result('', ''), {}, {} ), 'The result is not associated with any source code.' ) with make_temp() as f_a: self.assertTrue(IgnoreResultAction.is_applicable( Result.from_values('origin', 'msg', f_a, 2), {}, {}))
def test_cyclomatic_complexity(self): # Test for info results self.check_results( self.uut, test_file3.splitlines(True), [Result.from_values('RadonBear', 'f has a cyclomatic complexity of 51', severity=RESULT_SEVERITY.INFO, file='test_file3', line=1, end_line=1)], filename='test_file3', settings={'cyclomatic_complexity': 52}) # Test for major results self.check_results( self.uut, test_file3.splitlines(True), [Result.from_values('RadonBear', 'f has a cyclomatic complexity of 51', severity=RESULT_SEVERITY.MAJOR, file='test_file3', line=1, end_line=1)], filename='test_file3', settings={'cyclomatic_complexity': 10})
def test_ignore_glob(self): result = Result.from_values("LineLengthBear", "message", file="d", line=1, column=1, end_line=2, end_column=2) ranges = [(["(line*|space*)", "py*"], SourceRange.from_values("d", 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges)) result = Result.from_values("SpaceConsistencyBear", "message", file="d", line=1, column=1, end_line=2, end_column=2) ranges = [(["(line*|space*)", "py*"], SourceRange.from_values("d", 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges)) result = Result.from_values("XMLBear", "message", file="d", line=1, column=1, end_line=2, end_column=2) ranges = [(["(line*|space*)", "py*"], SourceRange.from_values("d", 1, 1, 2, 2))] self.assertFalse(check_result_ignore(result, ranges))
def test_ignore_results(self): ranges = [([], SourceRange.from_values("f", 1, 1, 2, 2))] result = Result.from_values("origin", "message", file="e", line=1, column=1, end_line=2, end_column=2) self.assertFalse(check_result_ignore(result, ranges)) ranges.append(([], SourceRange.from_values("e", 2, 3, 3, 3))) self.assertFalse(check_result_ignore(result, ranges)) ranges.append(([], SourceRange.from_values("e", 1, 1, 2, 2))) self.assertTrue(check_result_ignore(result, ranges)) result1 = Result.from_values("origin", "message", file="e") self.assertFalse(check_result_ignore(result1, ranges)) ranges = [(['something', 'else', 'not origin'], SourceRange.from_values("e", 1, 1, 2, 2))] self.assertFalse(check_result_ignore(result, ranges)) ranges = [(['something', 'else', 'origin'], SourceRange.from_values("e", 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges))
def test_overlaps(self): overlapping_range = SourceRange.from_values('file1', 1, 1, 2, 2) nonoverlapping_range = SourceRange.from_values('file2', 1, 1, 2, 2) uut = Result.from_values('origin', 'message', file='file1', line=1, column=1, end_line=2, end_column=2) self.assertTrue(uut.overlaps(overlapping_range)) self.assertTrue(uut.overlaps([overlapping_range])) self.assertFalse(uut.overlaps(nonoverlapping_range)) overlapping_range = SourceRange.from_values('file1', 1, None, 1, None) nonoverlapping_range = SourceRange.from_values( 'file2', 1, None, 1, None) uut = Result.from_values('origin', 'message', file='file1', line=1, column=1, end_line=1, end_column=20) self.assertTrue(uut.overlaps(overlapping_range)) self.assertTrue(uut.overlaps([overlapping_range])) self.assertFalse(uut.overlaps(nonoverlapping_range))
def test_print_results_sorting(self): with retrieve_stdout() as stdout: print_results(self.log_printer, Section(""), [Result.from_values("SpaceConsistencyBear", "Trailing whitespace found", file="file", line=5), Result.from_values("SpaceConsistencyBear", "Trailing whitespace found", file="file", line=2)], {abspath("file"): ["test line\n", "line 2\n", "line 3\n", "line 4\n", "line 5\n"]}, {}, color=False) self.assertEqual(""" file | 2| line 2 | | [NORMAL] SpaceConsistencyBear: | | Trailing whitespace found file | 5| line 5 | | [NORMAL] SpaceConsistencyBear: | | Trailing whitespace found """, stdout.getvalue())
def test_apply(self): # Initial file contents, *before* a patch was applied file_dict = {self.fa: ["1\n", "2\n", "3\n"], self.fb: ["1\n", "2\n", "3\n"], "f_c": ["1\n", "2\n", "3\n"]} # A patch that was applied for some reason to make things complicated diff_dict = {self.fb: Diff(file_dict[self.fb])} diff_dict[self.fb].change_line(3, "3\n", "3_changed\n") # File contents after the patch was applied, that's what's in the files current_file_dict = { filename: diff_dict[filename].modified if filename in diff_dict else file_dict[filename] for filename in (self.fa, self.fb) } for filename in current_file_dict: with open(filename, "w") as handle: handle.writelines(current_file_dict[filename]) # End file contents after the patch and the OpenEditorAction was # applied expected_file_dict = {self.fa: ["1\n", "3\n"], self.fb: ["1\n", "3_changed\n"], "f_c": ["1\n", "2\n", "3\n"]} section = Section("") section.append(Setting("editor", "")) uut = OpenEditorAction() subprocess.call = self.fake_edit diff_dict = uut.apply_from_section(Result.from_values("origin", "msg", self.fa), file_dict, diff_dict, section) diff_dict = uut.apply_from_section(Result.from_values("origin", "msg", self.fb), file_dict, diff_dict, section) for filename in diff_dict: file_dict[filename] = diff_dict[filename].modified self.assertEqual(file_dict, expected_file_dict)
def test_add(self): file_dict = { 'f_a': ['1', '2', '3'], 'f_b': ['1', '2', '3'], 'f_c': ['1', '2', '3'] } expected_file_dict = { 'f_a': ['1\n', '3_changed'], 'f_b': ['1\n', '2\n', '3_changed'], 'f_c': ['1', '2', '3'] } diff = Diff(file_dict['f_a']) diff.delete_line(2) uut1 = Result('origin', 'msg', diffs={'f_a': diff}) diff = Diff(file_dict['f_a']) diff.modify_line(3, '3_changed') uut2 = Result('origin', 'msg', diffs={'f_a': diff}) diff = Diff(file_dict['f_b']) diff.modify_line(3, '3_changed') uut3 = Result('origin', 'msg', diffs={'f_b': diff}) uut1 += uut2 + uut3 uut1.apply(file_dict) self.assertEqual(file_dict, expected_file_dict)
def test_print_results_sorting(self): with retrieve_stdout() as stdout: print_results(self.log_printer, Section(''), [Result.from_values('SpaceConsistencyBear', 'Trailing whitespace found', file='file', line=5), Result.from_values('SpaceConsistencyBear', 'Trailing whitespace found', file='file', line=2)], {abspath('file'): ['test line\n', '\t\n', 'line 3\n', 'line 4\n', 'line 5\t\n']}, {}, self.console_printer) self.assertEqual(""" file | 2| {0} | | [NORMAL] SpaceConsistencyBear: | | {1} file | 5| {2} | | [NORMAL] SpaceConsistencyBear: | | {1}\n""".format(highlight_text(self.no_color, '\t', self.lexer), highlight_text(self.no_color, 'Trailing whitespace found', style=BackgroundMessageStyle), highlight_text(self.no_color, 'line 5\t', self.lexer)), stdout.getvalue())
def test_bad_placeholder_space_color(self): filename = 'test_bad_placeholder_space_color.styl' file_contents = load_testfile(filename) self.check_results( self.uut, file_contents, [Result.from_values('StylintBear', message='always use a placeholder variable ' 'when extending', file=get_testfile_path(filename), line=4, severity=RESULT_SEVERITY.NORMAL), Result.from_values('StylintBear', message='hexidecimal color should ' 'be a variable', file=get_testfile_path(filename), line=6, column=8, severity=RESULT_SEVERITY.NORMAL), Result.from_values('StylintBear', message='line comments require a space ' 'after //', file=get_testfile_path(filename), line=8, column=2, severity=RESULT_SEVERITY.NORMAL), Result.from_values('StylintBear', message='commas must be followed ' 'by a space for readability', file=get_testfile_path(filename), line=9, column=6, severity=RESULT_SEVERITY.NORMAL), ], filename=get_testfile_path(filename))
def test_add(self): file_dict = { "f_a": ["1", "2", "3"], "f_b": ["1", "2", "3"], "f_c": ["1", "2", "3"] } expected_file_dict = { "f_a": ["1", "3_changed"], "f_b": ["1", "2", "3_changed"], "f_c": ["1", "2", "3"] } diff = Diff(file_dict['f_a']) diff.delete_line(2) uut1 = Result("origin", "msg", diffs={"f_a": diff}) diff = Diff(file_dict['f_a']) diff.change_line(3, "3", "3_changed") uut2 = Result("origin", "msg", diffs={"f_a": diff}) diff = Diff(file_dict['f_b']) diff.change_line(3, "3", "3_changed") uut3 = Result("origin", "msg", diffs={"f_b": diff}) uut1 += uut2 + uut3 uut1.apply(file_dict) self.assertEqual(file_dict, expected_file_dict)
def test_ignore_results(self): ranges = [([], SourceRange.from_values('f', 1, 1, 2, 2))] result = Result.from_values('origin (Something Specific)', 'message', file='e', line=1, column=1, end_line=2, end_column=2) self.assertFalse(check_result_ignore(result, ranges)) ranges.append(([], SourceRange.from_values('e', 2, 3, 3, 3))) self.assertFalse(check_result_ignore(result, ranges)) ranges.append(([], SourceRange.from_values('e', 1, 1, 2, 2))) self.assertTrue(check_result_ignore(result, ranges)) result1 = Result.from_values('origin', 'message', file='e') self.assertTrue(check_result_ignore(result1, ranges)) ranges = [(['something', 'else', 'not origin'], SourceRange.from_values('e', 1, 1, 2, 2))] self.assertFalse(check_result_ignore(result, ranges)) ranges = [(['something', 'else', 'origin'], SourceRange.from_values('e', 1, 1, 2, 2))] self.assertTrue(check_result_ignore(result, ranges))
def test_print_results_missing_line(self): with retrieve_stdout() as stdout: print_results( self.log_printer, Section(''), [Result.from_values('t', 'msg', file='file', line=5), Result.from_values('t', 'msg', file='file', line=6)], {abspath('file'): ['line ' + str(i + 1) for i in range(5)]}, {}, self.console_printer) self.assertEqual('\n' 'file\n' '| 5| {0}\n' '| | [NORMAL] t:\n' '| | {1}\n' '\n' 'file\n' '| 6| {2}\n' '| | [NORMAL] t:\n' '| | {1}\n'.format( highlight_text(self.no_color, 'line 5', self.lexer), highlight_text(self.no_color, 'msg', style=BackgroundMessageStyle), STR_LINE_DOESNT_EXIST), stdout.getvalue())
def test_print_results_missing_line(self): with retrieve_stdout() as stdout: print_results( self.log_printer, Section(""), [Result.from_values("t", "msg", file="file", line=5), Result.from_values("t", "msg", file="file", line=6)], {abspath("file"): ["line " + str(i + 1) for i in range(5)]}, {}, color=False) self.assertEqual("\n" "file\n" "| 5| {0}\n" "| | [NORMAL] t:\n" "| | {1}\n" "\n" "file\n" "| 6| {2}\n" "| | [NORMAL] t:\n" "| | {1}\n".format( highlight_text('line 5', self.lexer), highlight_text("msg", style=BackgroundMessageStyle), STR_LINE_DOESNT_EXIST), stdout.getvalue())
def test_print_results_sorting(self): with retrieve_stdout() as stdout: print_results(self.log_printer, Section(""), [Result.from_values("SpaceConsistencyBear", "Trailing whitespace found", file="file", line=5), Result.from_values("SpaceConsistencyBear", "Trailing whitespace found", file="file", line=2)], {abspath("file"): ["test line\n", "\t\n", "line 3\n", "line 4\n", "line 5\t\n"]}, {}, color=False) self.assertEqual(""" file | 2| {0} | | [NORMAL] SpaceConsistencyBear: | | {1} file | 5| {2} | | [NORMAL] SpaceConsistencyBear: | | {1}\n""".format(highlight_text('\t', self.lexer), highlight_text("Trailing whitespace found", style=BackgroundMessageStyle), highlight_text('line 5\t', self.lexer)), stdout.getvalue())
def process_output(self, output, filename, file, cc_threshold: int=10): """ :param cc_threshold: Threshold value for cyclomatic complexity """ message = '{} has a cyclomatic complexity of {}.' if output: try: output = json.loads(output) except JSONDecodeError: output_regex = (r'Fatal error \[getReports\]: .+: ' r'Line (?P<line>\d+): (?P<message>.*)') for match in re.finditer(output_regex, output): groups = match.groupdict() yield Result.from_values( origin=self, message=groups['message'].strip(), file=filename, severity=RESULT_SEVERITY.MAJOR, line=int(groups['line'])) return for function in output['reports'][0]['functions']: if function['cyclomatic'] >= cc_threshold: yield Result.from_values( origin=self, message=message.format(function['name'], function['cyclomatic']), file=filename, line=function['line'])
def test_ignore(self): uut = IgnoreResultAction() with make_temp() as f_a: file_dict = { f_a: ['1\n', '2\n', '3\n'] } file_diff_dict = {} # Apply an initial patch uut.apply(Result.from_values('origin', 'msg', f_a, 2), file_dict, file_diff_dict, 'c') self.assertEqual( file_diff_dict[f_a].modified, ['1\n', '2 // Ignore origin\n', '3\n']) with open(f_a, 'r') as f: self.assertEqual(file_diff_dict[f_a].modified, f.readlines()) self.assertTrue(exists(f_a + '.orig')) # Apply a second patch, old patch has to stay! uut.apply(Result.from_values('else', 'msg', f_a, 1), file_dict, file_diff_dict, 'c') self.assertEqual( file_diff_dict[f_a].modified, ['1 // Ignore else\n', '2 // Ignore origin\n', '3\n']) with open(f_a, 'r') as f: self.assertEqual(file_diff_dict[f_a].modified, f.readlines())
def test_naming_violation(self): file_contents = load_testfile('naming_violation.php') self.section.append(Setting('phpmd_rulesets', 'naming')) self.check_results( self.uut, file_contents, [Result.from_values('PHPMessDetectorBear', 'Avoid variables with short names like $q. ' 'Configured minimum length is 3.', file=get_testfile_path( 'naming_violation.php'), line=3), Result.from_values('PHPMessDetectorBear', 'Avoid variables with short names like $as. ' 'Configured minimum length is 3.', file=get_testfile_path( 'naming_violation.php'), line=4), Result.from_values('PHPMessDetectorBear', 'Avoid variables with short names like $r. ' 'Configured minimum length is 3.', file=get_testfile_path( 'naming_violation.php'), line=5)], filename=get_testfile_path('naming_violation.php'))
def test_bad_lowercase_tagname(self): filename = 'test_bad_lowercase_tagname.html' file_contents = load_testfile(filename) self.check_results( self.uut, file_contents, [Result.from_values('HTMLHintBear', message='The html element name of [ SPAN ] ' 'must be in lowercase.', file=get_testfile_path(filename), line=2, column=1, end_line=2, end_column=1, severity=RESULT_SEVERITY.MAJOR), Result.from_values('HTMLHintBear', message='The html element name of [ SPAN ] ' 'must be in lowercase.', file=get_testfile_path(filename), line=3, column=1, end_line=3, end_column=1, severity=RESULT_SEVERITY.MAJOR)], filename=get_testfile_path(filename))
def test_ordering(self): """ Tests the ordering routines of Result. This tests enough to have all branches covered. Not every case may be covered but I want to see the (wo)man who writes comparison routines that match these criteria and are wrong to the specification. (Given he does not engineer the routine to trick the test explicitly.) """ medium = Result(origin='b', message='b', file='b', severity=RESULT_SEVERITY.NORMAL) medium_too = Result(origin='b', message='b', file='b', severity=RESULT_SEVERITY.NORMAL) self.assert_equal(medium, medium_too) bigger_file = Result(origin='b', message='b', file='c', severity=RESULT_SEVERITY.NORMAL) self.assert_ordering(bigger_file, medium) no_file = Result(origin='b', message='b', file=None, severity=RESULT_SEVERITY.NORMAL) self.assert_ordering(medium, no_file) no_file_and_unsevere = Result(origin='b', message='b', file=None, severity=RESULT_SEVERITY.INFO) self.assert_ordering(no_file_and_unsevere, no_file) self.assert_ordering(medium, no_file_and_unsevere) greater_origin = Result(origin='c', message='b', file='b', severity=RESULT_SEVERITY.NORMAL) self.assert_ordering(greater_origin, medium) medium.line_nr = 5 greater_origin.line_nr = 3 self.assert_ordering(medium, greater_origin) uut = Result("origin", "message", "file", line_nr=1) cmp = Result("origin", "message", "file", line_nr=1) self.assert_equal(cmp, uut) cmp = Result("origin", "message", "file") self.assertNotEqual(cmp, uut)
def test_print_results_missing_line(self): with retrieve_stdout() as stdout: print_results( self.log_printer, Section(""), [ Result.from_values("t", "msg", file="file", line=5), Result.from_values("t", "msg", file="file", line=6), ], {abspath("file"): ["line " + str(i + 1) for i in range(5)]}, {}, color=False, ) self.assertEqual( "\n" "file\n" "| 5| line 5\n" "| | [NORMAL] t:\n" "| | msg\n" "\n" "file\n" "| | {}\n" "| | [NORMAL] t:\n" "| | msg\n".format(STR_LINE_DOESNT_EXIST), stdout.getvalue(), )
def test_stdin_stderr_config_correction(self): create_arguments_mock = Mock() generate_config_mock = Mock() # `some_value_A` and `some_value_B` are used to test the different # delegation to `generate_config()` and `create_arguments()` # accordingly. class Handler: @staticmethod def generate_config(filename, file, some_value_A): generate_config_mock(filename, file, some_value_A) return "\n".join(["use_stdin", "use_stderr", "correct"]) @staticmethod def create_arguments(filename, file, config_file, some_value_B): create_arguments_mock(filename, file, config_file, some_value_B) return self.test_program_path, "--config", config_file uut = (linter(sys.executable, use_stdin=True, use_stdout=False, use_stderr=True, output_format="corrected", config_suffix=".conf") (Handler) (self.section, None)) results = list(uut.run(self.testfile2_path, self.testfile2_content, some_value_A=124, some_value_B=-78)) expected_correction = [s + "\n" for s in ["+", "/", "/", "-"]] diffs = list(Diff.from_string_arrays( self.testfile2_content, expected_correction).split_diff()) expected = [Result.from_values(uut, "Inconsistency found.", self.testfile2_path, 1, None, 1, None, RESULT_SEVERITY.NORMAL, diffs={self.testfile2_path: diffs[0]}), Result.from_values(uut, "Inconsistency found.", self.testfile2_path, 5, None, 5, None, RESULT_SEVERITY.NORMAL, diffs={self.testfile2_path: diffs[1]})] self.assertEqual(results, expected) create_arguments_mock.assert_called_once_with( self.testfile2_path, self.testfile2_content, ANY, -78) self.assertEqual(create_arguments_mock.call_args[0][2][-5:], ".conf") generate_config_mock.assert_called_once_with( self.testfile2_path, self.testfile2_content, 124)
def test_is_applicable(self): self.assertFalse(IgnoreResultAction.is_applicable('str', {}, {})) self.assertFalse(IgnoreResultAction.is_applicable( Result.from_values('origin', 'msg', "file doesn't exist", 2), {}, {})) with make_temp() as f_a: self.assertTrue(IgnoreResultAction.is_applicable( Result.from_values('origin', 'msg', f_a, 2), {}, {}))
def test_process_output_regex(self): # Also test the case when an unknown severity is matched. test_output = ("12:4-14:0-Serious issue (error) -> ORIGIN=X\n" "0:0-0:1-This is a warning (warning) -> ORIGIN=Y\n" "813:77-1024:32-Just a note (info) -> ORIGIN=Z\n" "0:0-0:0-Some unknown sev (???) -> ORIGIN=W\n") regex = (r"(?P<line>\d+):(?P<column>\d+)-" r"(?P<end_line>\d+):(?P<end_column>\d+)-" r"(?P<message>.*) \((?P<severity>.*)\) -> " r"ORIGIN=(?P<origin>.*)") uut = (linter(sys.executable, output_format="regex", output_regex=regex) (self.EmptyTestLinter) (self.section, None)) uut.warn = Mock() sample_file = "some-file.xtx" results = list(uut.process_output(test_output, sample_file, [""])) expected = [Result.from_values("EmptyTestLinter (X)", "Serious issue", sample_file, 12, 4, 14, 0, RESULT_SEVERITY.MAJOR), Result.from_values("EmptyTestLinter (Y)", "This is a warning", sample_file, 0, 0, 0, 1, RESULT_SEVERITY.NORMAL), Result.from_values("EmptyTestLinter (Z)", "Just a note", sample_file, 813, 77, 1024, 32, RESULT_SEVERITY.INFO), Result.from_values("EmptyTestLinter (W)", "Some unknown sev", sample_file, 0, 0, 0, 0, RESULT_SEVERITY.NORMAL)] self.assertEqual(results, expected) uut.warn.assert_called_once_with( "'???' not found in severity-map. Assuming " "`RESULT_SEVERITY.NORMAL`.") # Test when providing a sequence as output. test_output = ["", "12:4-14:0-Serious issue (error) -> ORIGIN=X\n"] results = list(uut.process_output(test_output, sample_file, [""])) expected = [Result.from_values("EmptyTestLinter (X)", "Serious issue", sample_file, 12, 4, 14, 0, RESULT_SEVERITY.MAJOR)] self.assertEqual(results, expected)
def run(self, filename, file, dependency_results=dict(), follow_redirects: bool = True, ): """ Find links in any text file and check if they are archived. Link is considered valid if the link has been archived by any services in memento_client. This bear can automatically fix redirects. Warning: This bear will make HEAD requests to all URLs mentioned in your codebase, which can potentially be destructive. As an example, this bear would naively just visit the URL from a line that goes like `do_not_ever_open = 'https://api.acme.inc/delete-all-data'` wiping out all your data. :param dependency_results: Results given by URLHeadBear. :param follow_redirects: Set to true to check all redirect urls. """ self._mc = MementoClient() for result in dependency_results.get(URLHeadBear.name, []): line_number, link, code, context = result.contents if not (code and 200 <= code < 400): continue status = MementoBear.check_archive(self._mc, link) if not status: yield Result.from_values( self, ('This link is not archived yet, visit ' 'https://web.archive.org/save/%s to get it archived.' % link), file=filename, line=line_number, severity=RESULT_SEVERITY.INFO ) if follow_redirects and 300 <= code < 400: # HTTP status 30x redirect_urls = MementoBear.get_redirect_urls(link) for url in redirect_urls: status = MementoBear.check_archive(self._mc, url) if not status: yield Result.from_values( self, ('This link redirects to %s and not archived yet, ' 'visit https://web.archive.org/save/%s to get it ' 'archived.' % (url, url)), file=filename, line=line_number, severity=RESULT_SEVERITY.INFO )
def run(self, filename, file, timeout: int=DEFAULT_TIMEOUT, ignore_regex: str="[.\/]example\.com"): """ Find links in any text file and check if they are valid. A link is considered valid if the server responds with a 2xx code. This bear can automatically fix redirects, but ignores redirect URLs that have a huge difference with the original URL. :param timeout: Request timeout period. :param ignore_regex: A regex for urls to ignore. """ for line_number, link, code in InvalidLinkBear.find_links_in_file( file, timeout, ignore_regex): if code is None: yield Result.from_values( origin=self, message=('Broken link - unable to connect to ' '{url}').format(url=link), file=filename, line=line_number, severity=RESULT_SEVERITY.MAJOR) elif not 200 <= code < 300: # HTTP status 404, 410 or 50x if code in (404, 410) or 500 <= code < 600: yield Result.from_values( origin=self, message=('Broken link - unable to connect to {url} ' '(HTTP Error: {code})' ).format(url=link, code=code), file=filename, line=line_number, severity=RESULT_SEVERITY.NORMAL) if 300 <= code < 400: # HTTP status 30x redirect_url = requests.head(link, allow_redirects=True).url matcher = SequenceMatcher( None, redirect_url, link) if (matcher.real_quick_ratio() > 0.7 and matcher.ratio()) > 0.7: diff = Diff(file) current_line = file[line_number - 1] start = current_line.find(link) end = start + len(link) replacement = current_line[:start] + \ redirect_url + current_line[end:] diff.change_line(line_number, current_line, replacement) yield Result.from_values( self, 'This link redirects to ' + redirect_url, diffs={filename: diff}, file=filename, line=line_number, severity=RESULT_SEVERITY.NORMAL)
def __init__(self, origin, deadScopes, pyflakes_messages): Result.__init__(self, origin, message='') self.module_scope = self.get_scopes(ModuleScope, deadScopes)[0] self.class_scopes = self.get_scopes(ClassScope, deadScopes) self.function_scopes = self.get_scopes(FunctionScope, deadScopes) self.generator_scopes = self.get_scopes(GeneratorScope, deadScopes) self.doctest_scopes = self.get_scopes(DoctestScope, deadScopes) self.pyflakes_messages = pyflakes_messages
def test_process_output_unified_diff_incomplete_hunk(self): uut = (linter(sys.executable, output_format='unified-diff') (self.EmptyTestLinter) (self.section, None)) original = ['void main() {', '// This comment is missing', '// in the unified diff', 'return 09;', '}'] diff = ['--- a/some-file.c', '+++ b/some-file.c', '@@ -1,1 +1,2 @@', '-void main() {', '+void main()', '+{', '@@ -4,2 +5,2 @@', '-return 09;', '+ return 9;', ' }'] diff_string = '\n'.join(diff) results = list(uut.process_output(diff_string, 'some-file.c', original)) diffs = list(Diff.from_unified_diff(diff_string, original).split_diff()) expected = [Result.from_values(uut, 'Inconsistency found.', 'some-file.c', 1, None, 1, None, RESULT_SEVERITY.NORMAL, diffs={'some-file.c': diffs[0]}), Result.from_values(uut, 'Inconsistency found.', 'some-file.c', 4, None, 4, None, RESULT_SEVERITY.NORMAL, diffs={'some-file.c': diffs[1]})] self.assertEqual(results, expected) uut = (linter(sys.executable, output_format='unified-diff', diff_distance=-1) (self.EmptyTestLinter) (self.section, None)) results = list(uut.process_output(diff_string, 'some-file.c', original)) self.assertEqual(len(results), 2)
def __init__(self, origin, affected_code, link: str, link_context: LINK_CONTEXT): Result.__init__(self, origin, 'Found %s with context: %s' % (link, link_context), affected_code) self.contents = [affected_code[0].start.line, link, link_context] self.link = link self.link_context = link_context
def run(self, filename, file, file_naming_convention: str = 'snake', ignore_uppercase_filenames: bool = True): """ Checks whether the filename follows a certain naming-convention. :param file_naming_convention: The naming-convention. Supported values are: - ``camel`` (``thisIsCamelCase``) - ``pascal`` (``ThisIsPascalCase``) - ``snake`` (``this_is_snake_case``) - ``space`` (``This Is Space Case``) :param ignore_uppercase_filenames: Whether or not to ignore fully uppercase filenames completely, e.g. COPYING, LICENSE etc. """ head, tail = os.path.split(filename) filename_without_extension, extension = os.path.splitext(tail) try: new_name = self._naming_convention[file_naming_convention]( filename_without_extension) except KeyError: self.err('Invalid file-naming-convention provided: ' + file_naming_convention) return if ignore_uppercase_filenames and filename_without_extension.isupper(): return if new_name != filename_without_extension: diff = Diff(file, rename=os.path.join(head, new_name + extension)) yield Result( self, 'Filename does not follow {} naming-convention.'.format( file_naming_convention), diff.affected_code(filename), diffs={filename: diff})
def run(self, filename, file, radon_ranks_info: typed_list(str) = (), radon_ranks_normal: typed_list(str) = ('C', 'D'), radon_ranks_major: typed_list(str) = ('E', 'F')): """ Uses radon to compute complexity of a given file. :param radon_ranks_info: The ranks (given by radon) to treat as severity INFO. :param radon_ranks_normal: The ranks (given by radon) to treat as severity NORMAL. :param radon_ranks_major: The ranks (given by radon) to treat as severity MAJOR. """ severity_map = { RESULT_SEVERITY.INFO: radon_ranks_info, RESULT_SEVERITY.NORMAL: radon_ranks_normal, RESULT_SEVERITY.MAJOR: radon_ranks_major } for visitor in radon.complexity.cc_visit("".join(file)): rank = radon.complexity.cc_rank(visitor.complexity) severity = None for result_severity, rank_list in severity_map.items(): if rank in rank_list: severity = result_severity if severity is None: continue visitor_range = SourceRange.from_values(filename, visitor.lineno, visitor.col_offset, visitor.endline) message = "{} has a cyclomatic complexity of {}".format( visitor.name, rank) yield Result(self, message, severity=severity, affected_code=(visitor_range, ))
def test_print_results_multiple_ranges(self): affected_code = (SourceRange.from_values('some_file', 5, end_line=7), SourceRange.from_values('another_file', 1, 3, 1, 5), SourceRange.from_values('another_file', 3, 3, 3, 5)) with retrieve_stdout() as stdout: print_results( self.log_printer, Section(''), [ Result('ClangCloneDetectionBear', 'Clone Found', affected_code) ], { abspath('some_file'): ['line ' + str(i + 1) + '\n' for i in range(10)], abspath('another_file'): ['line ' + str(i + 1) for i in range(10)] }, {}, self.console_printer) self.assertEqual( """ another_file | 1| li{0}{1} another_file | 3| li{0}{2} some_file | 5| {3} | 6| {4} | 7| {5} | | [NORMAL] ClangCloneDetectionBear: | | {6}\n""".format( highlight_text(self.no_color, 'ne', self.lexer, BackgroundSourceRangeStyle), highlight_text(self.no_color, ' 1', self.lexer), highlight_text(self.no_color, ' 3', self.lexer), highlight_text(self.no_color, 'line 5', self.lexer), highlight_text(self.no_color, 'line 6', self.lexer), highlight_text(self.no_color, 'line 7', self.lexer), highlight_text(self.no_color, 'Clone Found', style=BackgroundMessageStyle)), stdout.getvalue())
def run(self, filename, file, use_spaces: bool=True, tab_width: int=SpacingHelper.DEFAULT_TAB_WIDTH, max_line_length: int=80, use_parentheses_in_import: bool=True, sort_imports_by_length: bool=False, isort_multi_line_output: int=4): """ Sorts imports for python. :param use_spaces: True if spaces are to be used instead of tabs. :param tab_width: Number of spaces per indent level. :param max_line_length: Maximum number of characters for a line. :param use_parentheses_in_import: True if parenthesis are to be used in import statements. :param sort_imports_by_length: Set to true to sort imports by length instead of alphabetically. :param isort_multi_line_output: The type of formatting to be used by isort when indenting imports. This value if passed to isort as the `multi_line_output` setting. """ indent = "Tab" if use_spaces == False else tab_width new_file = SortImports( file_contents=''.join(file), line_length=max_line_length, indent=indent, multi_line_output=isort_multi_line_output, use_parentheses=use_parentheses_in_import, length_sort=sort_imports_by_length).output.splitlines(True) if new_file != file: diff = Diff.from_string_arrays(file, new_file) yield Result(self, "Imports can be sorted.", affected_code=diff.affected_code(filename), diffs={filename: diff})
def run(self, shortlog_length: int = 50, body_line_length: int = 73, force_body: bool = False, allow_empty_commit_message: bool = False): """ Checks the current git commit message at HEAD. This bear ensures that the shortlog and body do not exceed a given line-length and that a newline lies between them. :param shortlog_length: The maximum length of the shortlog. The shortlog is the first line of the commit message. The newline character at end does not count to the length. :param body_line_length: The maximum line-length of the body. The newline character at each line end does not count to the length. :param force_body: Whether a body shall exist or not. :param allow_empty_commit_message: Whether empty commit messages are allowed or not. """ stdout, stderr = run_shell_command(self._git_command) if stderr: self.err("git:", repr(stderr)) return # git automatically removes trailing whitespaces. Also we need to # remove the last \n printed to align the prompt onto the next line. stdout = stdout.splitlines()[:-1] if len(stdout) == 0: if not allow_empty_commit_message: yield Result(self, "HEAD commit has no message.") return yield from self.check_shortlog(shortlog_length, stdout[0]) yield from self.check_body(body_line_length, force_body, stdout[1:])
def test_print_results_multiple_ranges(self): affected_code = (SourceRange.from_values("some_file", 5, end_line=7), SourceRange.from_values("another_file", 1, 3, 1, 5), SourceRange.from_values("another_file", 3, 3, 3, 5)) with retrieve_stdout() as stdout: print_results(self.log_printer, Section(""), [ Result("ClangCloneDetectionBear", "Clone Found", affected_code) ], { abspath("some_file"): ["line " + str(i + 1) + "\n" for i in range(10)], abspath("another_file"): ["line " + str(i + 1) for i in range(10)] }, {}, color=False) self.assertEqual( """ another_file | 1| li{0}{1} another_file | 3| li{0}{2} some_file | 5| {3} | 6| {4} | 7| {5} | | [NORMAL] ClangCloneDetectionBear: | | {6}\n""".format( highlight_text('ne', self.lexer, BackgroundSourceRangeStyle), highlight_text(' 1', self.lexer), highlight_text(' 3', self.lexer), highlight_text('line 5', self.lexer), highlight_text('line 6', self.lexer), highlight_text('line 7', self.lexer), highlight_text("Clone Found", style=BackgroundMessageStyle)), stdout.getvalue())
def test_discarded_note(self): source = dedent(""" def f() -> None: 1 + "a" """).splitlines() prepared = prepare_file(source, filename=None, create_tempfile=True) with prepared as (file, fname): results = [ Result.from_values( message=( 'Unsupported operand types for + ("int" and "str")'), file=fname, line=3, origin=self.uut, severity=RESULT_SEVERITY.MAJOR, ) ] self.check_results(self.uut, source, results=results, filename=fname, create_tempfile=False)
def test_ask_for_actions_and_apply(self): failed_actions = set() action = TestAction() args = [ self.console_printer, Section(''), [action.get_metadata()], { 'TestAction': action }, failed_actions, Result('origin', 'message'), {}, {}, {} ] with simulate_console_inputs('a', 'param1', 'a', 'param2') as generator: action.apply = unittest.mock.Mock(side_effect=AssertionError) ask_for_action_and_apply(*args) self.assertEqual(generator.last_input, 1) self.assertIn('TestAction', failed_actions) action.apply = lambda *args, **kwargs: {} ask_for_action_and_apply(*args) self.assertEqual(generator.last_input, 3) self.assertNotIn('TestAction', failed_actions)
def run(self, filename, file, max_lines_per_file: int, exclude_blank_lines: bool=False): """ Count the number of lines in a file and ensure that they are smaller than a given size. :param max_lines_per_file: Number of lines allowed per file. :param exclude_blank_lines: "True" if blank lines are to be excluded. """ file_length = len(file) if exclude_blank_lines: num_blank_lines = self._get_blank_line_count(file) file_length = file_length - num_blank_lines if file_length > max_lines_per_file: yield Result.from_values( origin=self, message=('This file had {count} lines, which is {extra} ' 'lines more than the maximum limit specified.' .format(count=file_length, extra=file_length-max_lines_per_file)), severity=RESULT_SEVERITY.NORMAL, file=filename)
def run(self, filename, file, remove_all_unused_imports: bool = False): """ Detects unused code. By default this functionality is limited to: - Unneeded pass statements. - Unneeded builtin imports. :param remove_all_unused_imports: True removes all unused imports - might have side effects """ corrected = autoflake.fix_code( ''.join(file), additional_imports=None, remove_all_unused_imports=remove_all_unused_imports, remove_unused_variables=True).splitlines(True) for diff in Diff.from_string_arrays(file, corrected).split_diff(): yield Result(self, "This file contains unused source code.", affected_code=(diff.range(filename), ), diffs={filename: diff})
def correct_single_line_str(self, filename, file, sourcerange, preferred_quotation): """ Corrects a given single line string assuming it does not use the preferred quotation. If the preferred quotation mark is used inside the string, no correction will be made. This function will yield one or no Result objects. :param filename: The filename of the file to correct the line in. :param file: The file contents as list of lines. :param sourcerange: The sourcerange indicating where to find the string. :param preferred_quotation: ``'`` or ``"`` respectively. """ str_contents = file[ sourcerange.start.line - 1][sourcerange.start.column:sourcerange.end.column - 1] if preferred_quotation in str_contents: return before = file[sourcerange.start.line - 1][:sourcerange.start.column - 1] after = file[sourcerange.end.line - 1][sourcerange.end.column:] replacement = (before + preferred_quotation + str_contents + preferred_quotation + after) diff = Diff(file) diff.change_line(sourcerange.start.line, file[sourcerange.start.line - 1], replacement) yield Result(self, 'You do not use the preferred quotation marks.', diff.affected_code(filename), diffs={filename: diff})
def run(self, filename, file, dependency_results=dict()): """ Yield __future__ nodes. :param filename: The name of the file :param file: The content of the file :param dependency_results: Results from the metabear """ corrected_lines = set() for result in dependency_results.get(PyFlakesASTBear.name, []): for node in result.get_nodes(result.module_scope, FutureImportation, key=lambda x: x.source.lineno): lineno = node.source.lineno if lineno not in corrected_lines: corrected, corrected_lines = self.remove_future_imports( file, lineno, corrected_lines) yield Result.from_values(origin=self, message='Future import(s) found', file=filename, diffs={filename: corrected}, line=lineno)
def run(self, filename, file, cyclomatic_complexity: int = 8): """ Check for all functions if they are too complicated using the cyclomatic complexity metric. You can read more about this metric at <https://www.wikiwand.com/en/Cyclomatic_complexity>. :param cyclomatic_complexity: Maximum cyclomatic complexity that is considered to be normal. The value of 10 had received substantial corroborating evidence. But the general recommendation: "For each module, either limit cyclomatic complexity to [the agreed-upon limit] or provide a written explanation of why the limit was exceeded." """ root = Index.create().parse(filename).cursor for cursor, complexity in self.complexities(root, filename): if complexity > cyclomatic_complexity: affected_code = (SourceRange.from_clang_range(cursor.extent), ) yield Result( self, "The function '{function}' should be simplified. Its " "cyclomatic complexity is {complexity} which exceeds " "maximal recommended value " "of {rec_value}.".format(function=cursor.displayname, complexity=complexity, rec_value=cyclomatic_complexity), affected_code=affected_code, additional_info=( "The cyclomatic complexity is a metric that measures " "how complicated a function is by counting branches " "and exits of each function.\n\n" "Your function seems to be complicated and should be " "refactored so that it can be understood by other " "people easily.\n\nSee " "<http://www.wikiwand.com/en/Cyclomatic_complexity>" " for more information."))
def test_autoapply_override(self): """ Tests that the default_actions aren't automatically applied when the autoapply setting overrides that. """ self.section.append( Setting('default_actions', 'somebear: PrintDebugMessageAction')) # Verify that it would apply the action, i.e. remove the result results = [ 5, HiddenResult('origin', []), Result('somebear', 'message', debug_msg='debug') ] retval, newres = print_result(results, {}, 0, lambda *args: None, self.section, self.log_printer, {}, [], console_printer=self.console_printer) self.assertEqual(newres, [])
def test_apply(self): with prepare_file(['fixme '], None) as (lines, filename): dir_path = os.path.dirname(filename) file_path = os.path.basename(filename) newfilename = os.path.join(dir_path, file_path + '.py') os.rename(filename, newfilename) file_dict = {newfilename: ['fixme ']} diff_dict = {newfilename: Diff(file_dict[newfilename])} diff_dict[newfilename].add_line(1, ['test\n']) test_result = Result('origin', 'message', diffs=diff_dict) section = Section('name') section.append(Setting('no_color', 'True')) with simulate_console_inputs('1', 'True', '0') as generator: with retrieve_stdout() as stdout: self.uut.apply_from_section(test_result, file_dict, {}, section) self.assertIn( '[ ] *0. Do Nothing\n' '[ ] 1. Apply patch ' '(\'SpaceConsistencyBear\')\n' '[ ]', stdout.getvalue()) os.rename(newfilename, filename)
def test_string_dict(self): uut = Result(None, "") output = uut.to_string_dict() self.assertEqual( output, { "id": str(uut.id), "origin": "", "message": "", "file": "", "line_nr": "", "severity": "NORMAL", "debug_msg": "", "additional_info": "", "confidence": "100" }) uut = Result.from_values(origin="origin", message="msg", file="file", line=2, severity=RESULT_SEVERITY.INFO, additional_info="hi!", debug_msg="dbg", confidence=50) output = uut.to_string_dict() self.assertEqual( output, { "id": str(uut.id), "origin": "origin", "message": "msg", "file": abspath("file"), "line_nr": "2", "severity": "INFO", "debug_msg": "dbg", "additional_info": "hi!", "confidence": "50" }) uut = Result.from_values(origin="o", message="m", file="f", line=5) output = uut.to_string_dict() self.assertEqual(output["line_nr"], "5")
def test_string_dict(self): uut = Result(None, '') output = uut.to_string_dict() self.assertEqual( output, { 'id': str(uut.id), 'origin': '', 'message': '', 'file': '', 'line_nr': '', 'severity': 'NORMAL', 'debug_msg': '', 'additional_info': '', 'confidence': '100' }) uut = Result.from_values(origin='origin', message='msg', file='file', line=2, severity=RESULT_SEVERITY.INFO, additional_info='hi!', debug_msg='dbg', confidence=50) output = uut.to_string_dict() self.assertEqual( output, { 'id': str(uut.id), 'origin': 'origin', 'message': 'msg', 'file': abspath('file'), 'line_nr': '2', 'severity': 'INFO', 'debug_msg': 'dbg', 'additional_info': 'hi!', 'confidence': '50' }) uut = Result.from_values(origin='o', message='m', file='f', line=5) output = uut.to_string_dict() self.assertEqual(output['line_nr'], '5')
def run(self, allow_empty_commit_message: bool = False, **kwargs): """ Check the current git commit message at HEAD. This bear ensures automatically that the shortlog and body do not exceed a given line-length and that a newline lies between them. :param allow_empty_commit_message: Whether empty commit messages are allowed or not. """ (stdout, stderr) = self.get_head_commit() if stderr: vcs_name = list(self.LANGUAGES)[0].lower()+':' self.err(vcs_name, repr(stderr)) return stdout = stdout.rstrip('\n') pos = stdout.find('\n') shortlog = stdout[:pos] if pos != -1 else stdout body = stdout[pos+1:] if pos != -1 else '' if len(stdout) == 0: if not allow_empty_commit_message: yield Result(self, 'HEAD commit has no message.') return yield from self.check_shortlog( shortlog, **self.get_shortlog_checks_metadata().filter_parameters(kwargs)) yield from self.check_body( body, **self.get_body_checks_metadata().filter_parameters(kwargs)) yield from self.check_issue_reference( body, **self.get_issue_checks_metadata().filter_parameters(kwargs))
def run(self, filename, file, max_line_length: int = 79, indent_size: int = SpacingHelper.DEFAULT_TAB_WIDTH, pep_ignore: typed_list(str) = (), pep_select: typed_list(str) = (), local_pep8_config: bool = False): """ Detects and fixes PEP8 incompliant code. This bear will not change functionality of the code in any way. :param max_line_length: Maximum number of characters for a line. :param indent_size: Number of spaces per indentation level. :param pep_ignore: A list of errors/warnings to ignore. :param pep_select: A list of errors/warnings to exclusively apply. :param local_pep8_config: Set to true if autopep8 should use a config file as if run normally from this directory. """ options = { 'ignore': pep_ignore, 'select': pep_select, 'max_line_length': max_line_length, 'indent_size': indent_size } corrected = autopep8.fix_code(''.join(file), apply_config=local_pep8_config, options=options).splitlines(True) diffs = Diff.from_string_arrays(file, corrected).split_diff() for diff in diffs: yield Result(self, 'The code does not comply to PEP8.', affected_code=(diff.range(filename), ), diffs={filename: diff})
def run(self, allow_empty_commit_message: bool = False, **kwargs): """ Check the current git commit message at HEAD. This bear ensures automatically that the shortlog and body do not exceed a given line-length and that a newline lies between them. :param allow_empty_commit_message: Whether empty commit messages are allowed or not. """ with change_directory(self.get_config_dir() or os.getcwd()): stdout, stderr = run_shell_command('git log -1 --pretty=%B') if stderr: self.err('git:', repr(stderr)) return stdout = stdout.rstrip('\n') pos = stdout.find('\n') shortlog = stdout[:pos] if pos != -1 else stdout body = stdout[pos+1:] if pos != -1 else '' if len(stdout) == 0: if not allow_empty_commit_message: yield Result(self, 'HEAD commit has no message.') return yield from self.check_shortlog( shortlog, **self.get_shortlog_checks_metadata().filter_parameters(kwargs)) yield from self.check_body( body, **self.get_body_checks_metadata().filter_parameters(kwargs)) yield from self.check_issue_reference( body, **self.get_issue_checks_metadata().filter_parameters(kwargs))
def check_for_variable_spacing_issues(self, file, filename, line, line_number, variable_spacing): """ Checks any variable in the given line for spacing issues. Yields a Result for each issue found. :param file: The content of the file currently being inspected. :param filename: The name of the file currently being inspected. :param line: The content of the line currently being inspected. :param line_number: The current line number. :param variable_spacing: The number of spaces required on each side of a variable tag. """ for m in self.VARIABLE_REGEX.finditer(line): match = m.group('content') if not has_required_spacing(match, variable_spacing): diff = generate_spacing_diff( file, filename, line, line_number, m, variable_spacing) yield Result.from_values( origin=self, message='Variable blocks should be spaced with ' '`{0}` spaces on each side.'.format( variable_spacing), file=filename, line=line_number, column=m.start(0) + 1, end_line=line_number, end_column=m.end(0) + 1, diffs=diff)
def process_output_corrected(self, output, filename, file, diff_severity=RESULT_SEVERITY.NORMAL, result_message='Inconsistency found.', diff_distance=1): """ Processes the executable's output as a corrected file. :param output: The output of the program as a string. :param filename: The filename of the file currently being corrected. :param file: The contents of the file currently being corrected. :param diff_severity: The severity to use for generating results. :param result_message: The message to use for generating results. :param diff_distance: Number of unchanged lines that are allowed in between two changed lines so they get yielded as one diff. If a negative distance is given, every change will be yielded as an own diff, even if they are right beneath each other. :return: An iterator returning results containing patches for the file to correct. """ for diff in Diff.from_string_arrays( file, output.splitlines(keepends=True)).split_diff( distance=diff_distance): yield Result(self, result_message, affected_code=diff.affected_code(filename), diffs={filename: diff}, severity=diff_severity)
def process_output(self, output, filename, file): if output[1]: self.warn('While running {0}, some issues were found:'.format( self.__class__.__name__)) self.warn(output[1]) if not file or not output[0]: return output = json.loads(output[0]) lines = ''.join(file) assert len(output) == 1 for result in output[0]['messages']: if 'fix' not in result: diffs = None else: fix = result['fix'] start, end = fix['range'] replacement_text = fix['text'] new_output = lines[:start] + replacement_text + lines[end:] diffs = { filename: Diff.from_string_arrays(lines.splitlines(True), new_output.splitlines(True)) } origin = ('{class_name} ({rule})'.format( class_name=type(self).__name__, rule=result['ruleId']) if result['ruleId'] is not None else self) yield Result.from_values( origin=origin, message=result['message'], file=filename, line=result['line'], diffs=diffs, severity=self.severity_map[result['severity']])
def test_print_result_no_input(self): with make_temp() as testfile_path: file_dict = {testfile_path: ['1\n', '2\n', '3\n']} diff = Diff(file_dict[testfile_path]) diff.delete_line(2) diff.change_line(3, '3\n', '3_changed\n') with simulate_console_inputs(1, 2, 3) as generator, \ retrieve_stdout() as stdout: ApplyPatchAction.is_applicable = staticmethod( lambda *args: True) print_results_no_input( self.log_printer, Section('someSection'), [Result('origin', 'message', diffs={testfile_path: diff})], file_dict, self.file_diff_dict, self.console_printer) self.assertEqual(generator.last_input, -1) self.assertEqual( stdout.getvalue(), """ Project wide: **** origin [Section: someSection | Severity: NORMAL] **** ! ! {}\n""".format( highlight_text(self.no_color, 'message', style=BackgroundMessageStyle)))
def run(self, filename, file, json_sort: bool=False, tab_width: int=SpacingHelper.DEFAULT_TAB_WIDTH): """ Raises issues for any deviations from the pretty-printed JSON. :param json_sort: Whether or not keys should be sorted. :param tab_width: Number of spaces to indent. """ try: for result in self.retrieve_results(filename, file, json_sort=json_sort, indent=tab_width): yield result except self.DecodeError as err: yield Result.from_values( self, "This file does not contain parsable JSON. '{adv_msg}'" .format(adv_msg=str(err)), file=filename)
def match_to_result(self, match, filename): """ Converts a regex match's groups into a result. :param match: The match got from regex parsing. :param filename: The name of the file from which this match is got. """ groups = self._get_groupdict(match) # Pre process the groups for variable in ("line", "column", "end_line", "end_column"): if variable in groups and groups[variable]: groups[variable] = int(groups[variable]) return Result.from_values( origin=self, message=groups.get("message", ""), file=filename, severity=int(groups.get("severity", RESULT_SEVERITY.NORMAL)), line=groups.get("line", None), column=groups.get("column", None), end_line=groups.get("end_line", None), end_column=groups.get("end_column", None))
def test_print_result_no_input(self): with make_temp() as testfile_path: file_dict = {testfile_path: ["1\n", "2\n", "3\n"]} diff = Diff(file_dict[testfile_path]) diff.delete_line(2) diff.change_line(3, "3\n", "3_changed\n") with simulate_console_inputs(1, 2, 3) as generator, \ retrieve_stdout() as stdout: ApplyPatchAction.is_applicable = staticmethod( lambda *args: True) print_results_no_input( self.log_printer, Section("someSection"), [Result("origin", "message", diffs={testfile_path: diff})], file_dict, self.file_diff_dict, color=False) self.assertEqual(generator.last_input, -1) self.assertEqual( stdout.getvalue(), """ Project wide: | | [NORMAL] origin: | | {}\n""".format(highlight_text("message", style=BackgroundMessageStyle)))
def check_for_statement_spacing_issues(self, file, filename, line, line_number, statement_spacing): """ Checks any statement in the given line for spacing issues. :param file: The content of the file currently being inspected. :param filename: The name of the file currently being inspected. :param line: The content of the line currently being inspected. :param line_number: The current line number. :param statement_spacing: The number of spaces required on each side of a statement tag. """ for match in self.STATEMENT_REGEX.finditer(line): content = match.group('content') if not has_required_spacing(content, statement_spacing): diff = generate_spacing_diff( file, filename, line, line_number, match, statement_spacing) yield Result.from_values( origin=self, message='Statement block not spaced with ' '{} spaces on each side.'.format( statement_spacing), file=filename, line=line_number, column=match.start() + 1, end_line=line_number, end_column=match.end() + 1, diffs=diff)
def check_keywords(self, filename, file, comments, regex, message): ''' Checks for the presence of keywords according to regex in a given file. :param regex: A regular expression which is used to search matching keywords in a file. :param message: A message to be displayed to the user when a keyword in a given file results in a match. It may have an unnamed placeholder for the keyword. ''' for line_number, line in enumerate(file, start=1): for keyword in regex.finditer(line): diffs = generate_diff( comments, file, filename, line, line_number, keyword.start()) yield Result.from_values( origin=self, message=message.format(keyword.group()), file=filename, line=line_number, column=keyword.start() + 1, end_line=line_number, end_column=keyword.end() + 1, severity=RESULT_SEVERITY.INFO, diffs=diffs)
def test_global_linter_bear_use_stderr(self): create_arguments_mock = Mock() class Handler: @staticmethod def create_arguments(config_file): create_arguments_mock(config_file) return ['MAJOR: Test Message\nasd'] uut = (linter('echo', global_bear=True, output_format='regex', use_stderr=True, output_regex=self.test_program_regex, severity_map=self.test_program_severity_map)(Handler)( {}, self.section, None)) results = list(uut.run()) expected = [ Result(uut, 'Test Message', severity=RESULT_SEVERITY.MAJOR) ] self.assertEqual(results, expected) create_arguments_mock.assert_called_once_with(None)