def test_comment_info(self) -> None: # A comment on a line with no other leading tokens is a "comment on own line". # In contrast, trailing comments come after other tokens on the same line. code = dedent_with_lstrip( """ # comment on own line # this is a # multiline comment def fn(): # comment on own line fn2() fn3() # trailing comment fn4() # comment on own line """ ) tokens = tokenize.tokenize(BytesIO(code.encode("utf-8")).readline) result = CommentInfo.compute(tokens=tokens) # The set of all comments includes both comments on their own line and trailing # comments. self.assertEqual([tok.start[0] for tok in result.comments], [1, 2, 3, 5, 7, 9]) # `comments_on_own_line` is a subset of all comments self.assertEqual( [tok.start[0] for tok in result.comments_on_own_line], [1, 2, 3, 5, 9] )
def test_compose_new_comment_multiline(self) -> None: source = dedent_with_lstrip(""" # lint-fixme: UsedRule, UsedRule2: reason... # lint: reason continued """) tokens = _get_tokens(source.encode()) ignore_info = IgnoreInfo.compute( comment_info=CommentInfo.compute(tokens=tokens), line_mapping_info=LineMappingInfo.compute(tokens=tokens), use_noqa=False, ) local_suppression_comments = (ignore_info.local_ignore_info. local_suppression_comments_by_line[1]) self.assertEqual(len(local_suppression_comments), 1) local_suppression_comment = local_suppression_comments[0] # First code unneeded. unneeded_codes = ["UsedRule"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [ "# lint-fixme: UsedRule2: reason... reason", "# lint: continued", ] self.assertEqual(new_comment_lines, expected_new_lines) # Second code unneeded. unneeded_codes = ["UsedRule2"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [ "# lint-fixme: UsedRule: reason... reason", "# lint: continued", ] self.assertEqual(new_comment_lines, expected_new_lines) # Both codes unneded. unneeded_codes = ["UsedRule", "UsedRule2"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [] self.assertEqual(new_comment_lines, expected_new_lines) # Both codes needed. unneeded_codes = [] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [ "# lint-fixme: UsedRule, UsedRule2: reason...", "# lint: reason continued", ] self.assertEqual(new_comment_lines, expected_new_lines)
class LineMappingInfoTest(UnitTest): @data_provider({ "simple": { "code": dedent_with_lstrip(""" def fn(): ... """), "physical_to_logical": { 1: 1, 2: 2, 3: 3 }, "next_non_empty_logical_line": { 1: 1, 2: 2, 3: 3 }, }, "comments": { "code": dedent_with_lstrip(""" # comment with # multiple # lines def fn(): # comment ... """), "physical_to_logical": { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7 }, "next_non_empty_logical_line": { 1: 4, 2: 4, 3: 4, 4: 4, 5: 6, 6: 6, 7: 7, }, }, "blank_lines": { "code": dedent_with_lstrip(""" def fn(): ... """), "physical_to_logical": { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7 }, "next_non_empty_logical_line": { 1: 3, 2: 3, 3: 3, 4: 6, 5: 6, 6: 6, 7: 7, }, }, "line_continuation": { "code": dedent_with_lstrip(""" value = "abc" value = \\ "abcd" + \\ "efgh" + \\ "ijkl" + \\ "mnop" """), "physical_to_logical": { 1: 1, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 7 }, "next_non_empty_logical_line": { 1: 1, 2: 2, 3: 7, 4: 7, 5: 7, 6: 7, 7: 7, }, }, "multiline_string": { "code": dedent_with_lstrip(""" value = "abc" value = ''' abcd efgh ijkl mnop ''' """), "physical_to_logical": { 1: 1, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2, 8: 8 }, "next_non_empty_logical_line": { 1: 1, 2: 2, 3: 8, 4: 8, 5: 8, 6: 8, 7: 8, 8: 8, }, }, }) def test_line_mapping( self, *, code: str, physical_to_logical: Mapping[int, int], next_non_empty_logical_line: Mapping[int, int], ) -> None: tokens = tokenize.tokenize(BytesIO(code.encode("utf-8")).readline) result = LineMappingInfo.compute(tokens=tokens) self.assertEqual(dict(result.physical_to_logical), dict(physical_to_logical)) for input, expected in next_non_empty_logical_line.items(): self.assertEqual(result.get_next_non_empty_logical_line(input), expected)
class IgnoreInfoTest(UnitTest): @data_provider({ # A noqa comment can be used without a specified code, which will ignore all # lint errors on that line. This is a bad practice, but we have to support # it for compatibility with existing code. (until we can codemod it away) "noqa_all_legacy": { "source": dedent_with_lstrip(""" fn1() fn2() # noqa fn3() """), "ignored_code": "IgnoredRule", "ignored_lines": [2], }, # When a noqa comment is specified with codes, it should only ignore the # specified codes. "noqa_with_code": { "source": dedent_with_lstrip(""" fn1() # noqa: IgnoredRule fn2() # noqa: IgnoredRule: Message fn3() # noqa: IgnoredRule, Ignored2Rule: Message fn4() # noqa: Ignored1Rule fn5() # noqa: Ignored1Rule, Ignored2Rule fn6() # noqa: Ignored1Rule, Ignored2Rule: Message """), "ignored_code": "Ignored1Rule", "ignored_lines": [4, 5, 6], }, "noqa_multiline": { "source": dedent_with_lstrip(""" fn1(line, \\ continuation) # noqa: IgnoredRule fn2() fn3(''' multiline string ''') # noqa: IgnoredRule """), "ignored_code": "IgnoredRule", "ignored_lines": [1, 2, 6, 7, 8, 9], }, "noqa_file": { "source": dedent_with_lstrip(""" # noqa-file: IgnoredRule: Some reason fn1() """), "ignored_code": "IgnoredRule", "ignored_lines": [1, 2, 3], }, "noqa_file_multiple_codes": { "source": dedent_with_lstrip(""" # noqa-file: IgnoredRule, Ignored1Rule, Ignored2Rule: Some reason fn1() """), "ignored_code": "Ignored1Rule", "ignored_lines": [1, 2, 3], }, "noqa_file_requires_code_and_reason": { "source": dedent_with_lstrip(""" # noqa-file # noqa-file: IgnoredRule # Neither of these noqa-files should work because they're incomplete fn1() """), "ignored_code": "IgnoredRule", "ignored_lines": [], }, "backwards_compatibility_classname": { "source": dedent_with_lstrip(""" fn1() # noqa: IG00, IgnoredRule """), "ignored_code": "IgnoredRule", "ignored_lines": [1], }, "backwards_compatibility_oldcode": { "source": dedent_with_lstrip(""" fn1() # noqa: IG00, IgnoredRule """), "ignored_code": "IG00", "ignored_lines": [1], }, "lint_fixme": { "source": dedent_with_lstrip(""" fn1() # lint-fixme: IgnoredRule: Some short reason fn2( # this line should be ignored "multiple", # but these lines shouldn't "arguments", ) # lint-fixme: IgnoredRule: Some reason spanning # lint: multiple lines because it's long. fn3(''' multiline string ''') # this function call is a single logical line fn4() """), "ignored_code": "IgnoredRule", "ignored_lines": [3, 4, 9, 10, 11, 12, 13, 14], }, "lint_ignore": { "source": dedent_with_lstrip(""" fn1() # lint-ignore: IgnoredRule: Some reason fn2() fn3() """), "ignored_code": "IgnoredRule", "ignored_lines": [3, 4], }, # A lint-ignore can exist right before an EOF. That's fine. We should ignore # all the way to the EOF. "lint_ignore_eof": { "source": dedent_with_lstrip(""" # lint-ignore: IgnoredRule """), "ignored_code": "IgnoredRule", "ignored_lines": [1, 2], }, }) def test_ignored_lines(self, *, source: str, ignored_code: str, ignored_lines: Container[int]) -> None: tokens = tuple( tokenize.tokenize(BytesIO(source.encode("utf-8")).readline)) ignore_info = IgnoreInfo.compute( comment_info=CommentInfo.compute(tokens=tokens), line_mapping_info=LineMappingInfo.compute(tokens=tokens), ) lines = range(1, tokens[-1].end[0] + 1) actual_ignored_lines = [] for line in lines: ignored = ignore_info.should_ignore_report( CstLintRuleReport( file_path=Path("fake/path.py"), node=cst.EmptyLine(), code=ignored_code, message="message", line=line, column=0, module=cst.MetadataWrapper(cst.parse_module(source)), module_bytes=source.encode("utf-8"), )) if ignored: actual_ignored_lines.append(line) # pyre-fixme[6]: Expected `Iterable[Variable[_T]]` for 1st param but got # `Container[int]`. self.assertEqual(actual_ignored_lines, list(ignored_lines)) @data_provider({ "unused_noqa": { "source": "fn() # noqa", "reports_on_lines": [], "unused_comments": [1], }, "used_noqa": { "source": "fn() # noqa", "reports_on_lines": [(1, "Ignored999Rule")], "unused_comments": [], }, "unused_lint_ignore": { "source": "# lint-ignore: Ignored999Rule: Some reason\nfn()", "reports_on_lines": [], "unused_comments": [1], }, "unused_lint_ignore_mutliple_lines": { "source": "# lint-ignore: Ignored999Rule: Some\n# lint: reason blah blah blah\nfn()", "reports_on_lines": [], "unused_comments": [1], }, "used_lint_ignore": { "source": "# lint-ignore: Ignored999Rule: Some reason\nfn()", "reports_on_lines": [(2, "Ignored999Rule")], "unused_comments": [], }, "used_lint_ignore_multiple_lines": { "source": "# lint-ignore: Ignored999Rule: Some\n# lint: reason blah blah blah\nfn()", "reports_on_lines": [(3, "Ignored999Rule")], "unused_comments": [], }, "lint_ignore_is_used_before_noqa": { "source": "# lint-ignore: Ignored999Rule: Some reason\nfn() # noqa", "reports_on_lines": [(2, "Ignored999Rule")], "unused_comments": [2], }, "duplicate_lint_ignores": { "source": "# lint-ignore: Ignored999Rule: First\n# lint-ignore: Ignored999Rule: Second\nfn()", "reports_on_lines": [(3, "Ignored999Rule")], "unused_comments": [2], }, "multiple_used_lint_ignores": { "source": dedent_with_lstrip(""" # lint-ignore: Ignored999Rule: Some # lint: reason blah blah blah # lint-ignore: Ignored1000Rule: Some # lint: other reason blah blah fn() """), "reports_on_lines": [(5, "Ignored999Rule"), (5, "Ignored1000Rule")], "unused_comments": [], }, "multiple_unused_lint_ignores": { "source": dedent_with_lstrip(""" # lint-ignore: Ignored999Rule: Some # lint: reason blah blah blah # lint-ignore: Ignored1000Rule: Some # lint: other reason blah blah fn() """), "reports_on_lines": [], "unused_comments": [1, 3], }, "some_unused_lint_ignores": { "source": dedent_with_lstrip(""" # lint-ignore: Ignored999Rule: Some # lint: reason blah blah blah # lint-ignore: Ignored1000Rule: Some # lint: other reason blah blah fn() """), "reports_on_lines": [(5, "Ignored999Rule")], "unused_comments": [3], }, "some_unused_lint_ignores_2": { "source": dedent_with_lstrip(""" # lint-ignore: Ignored999Rule: Some # lint: reason blah blah blah # lint-ignore: Ignored1000Rule: Some # lint: other reason blah blah fn() """), "reports_on_lines": [(5, "Ignored1000Rule")], "unused_comments": [1], }, }) def test_unused_comments( self, *, source: str, reports_on_lines: Iterable[Tuple[int, str]], unused_comments: Iterable[int], ) -> None: """ Verify that we can correctly track which lint comments were used and which were unused. TODO: We don't track usage of global ignore comments, so we can't know if they're unused. """ tokens = tuple( tokenize.tokenize(BytesIO(source.encode("utf-8")).readline)) ignore_info = IgnoreInfo.compute( comment_info=CommentInfo.compute(tokens=tokens), line_mapping_info=LineMappingInfo.compute(tokens=tokens), ) for line, code in reports_on_lines: ignore_info.should_ignore_report( CstLintRuleReport( file_path=Path("fake/path.py"), node=cst.EmptyLine(), code=code, message="message", line=line, column=0, module=cst.MetadataWrapper(cst.parse_module(source)), module_bytes=source.encode("utf-8"), )) self.assertEqual( sorted([ min(tok.start[0] for tok in c.tokens) for c in ignore_info.suppression_comments if not c.used_by ]), sorted(unused_comments), ) def test_multiline_suppression(self) -> None: source = """ # lint-ignore: SomeCode: some reason # lint: and some reason continued # lint: onto multiple lines. x = "Some ignored violation" """ tokens = tuple( tokenize.tokenize(BytesIO(source.encode("utf-8")).readline)) ignore_info = IgnoreInfo.compute( comment_info=CommentInfo.compute(tokens=tokens), line_mapping_info=LineMappingInfo.compute(tokens=tokens), ) self.assertEqual( len(ignore_info.local_ignore_info.local_suppression_comments), 1) local_supp_comment = next( lsc for lsc in ignore_info.local_ignore_info.local_suppression_comments) self.assertEqual( local_supp_comment.reason, "some reason and some reason continued onto multiple lines.", ) # Verify that all local suppression comment lines map to the same SuppressionComment instance. for ( lines, supp_comments, ) in ignore_info.local_ignore_info.local_suppression_comments_by_line.items( ): self.assertEqual(len(supp_comments), 1) supp_comment = supp_comments[0] self.assertIs(supp_comment, local_supp_comment)
class RemoveUnusedSuppressionsRuleTest(UnitTest): @data_provider({ "used_suppression_one_code_oneline": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule foo = bar """).encode(), "rules_in_lint_run": [UsedRule], "rules_without_report": [], "suppressed_line": 2, "expected_unused_suppressions_report_messages": [], }, "used_suppression_one_code_oneline_with_reason": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule: reason blah. foo = bar """).encode(), "rules_in_lint_run": [UsedRule], "rules_without_report": [], "suppressed_line": 2, "expected_unused_suppressions_report_messages": [], }, "used_suppression_one_code_multiline": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule: reason # lint: reason continued blah # lint: blah blah. foo = bar """).encode(), "rules_in_lint_run": [UsedRule], "rules_without_report": [], "suppressed_line": 4, "expected_unused_suppressions_report_messages": [], }, "used_suppression_many_codes_oneline": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule, UsedRule2 foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [], "suppressed_line": 2, "expected_unused_suppressions_report_messages": [], }, "used_suppression_many_codes_multiline": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule, UsedRule2: # lint: reason blah blah. foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [], "suppressed_line": 3, "expected_unused_suppressions_report_messages": [], }, "unused_suppression_one_code_oneline": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule: reason blah blah. foo = bar """).encode(), "rules_in_lint_run": [UsedRule], "rules_without_report": [UsedRule], "suppressed_line": 2, "expected_unused_suppressions_report_messages": [UNUSED_SUPPRESSION_COMMENT_MESSAGE], "expected_replacements": [ dedent_with_lstrip(""" foo = bar """) ], }, "unused_suppression_one_code_multiline": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule: reason # lint: reason continued. foo = bar """).encode(), "rules_in_lint_run": [UsedRule], "rules_without_report": [UsedRule], "suppressed_line": 3, "expected_unused_suppressions_report_messages": [UNUSED_SUPPRESSION_COMMENT_MESSAGE], "expected_replacements": [ dedent_with_lstrip(""" foo = bar """) ], }, "unused_suppression_many_codes_oneline": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule, UsedRule2: reason foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [UsedRule], "suppressed_line": 2, "expected_unused_suppressions_report_messages": [ UNUSED_SUPPRESSION_CODES_IN_COMMENT_MESSAGE.format( lint_codes="UsedRule") ], "expected_replacements": [ dedent_with_lstrip(""" # lint-ignore: UsedRule2: reason foo = bar """) ], }, "unused_suppression_many_codes_multiline": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule, UsedRule2: reason # lint: reason continued. foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [UsedRule], "suppressed_line": 3, "expected_unused_suppressions_report_messages": [ UNUSED_SUPPRESSION_CODES_IN_COMMENT_MESSAGE.format( lint_codes="UsedRule") ], "expected_replacements": [ dedent_with_lstrip(""" # lint-ignore: UsedRule2: reason reason # lint: continued. foo = bar """) ], }, "unused_suppression_many_codes_all_unused": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule, UsedRule2: reason # lint: reason continued. foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [UsedRule, UsedRule2], "suppressed_line": 3, "expected_unused_suppressions_report_messages": [UNUSED_SUPPRESSION_COMMENT_MESSAGE], "expected_replacements": [ dedent_with_lstrip(""" foo = bar """) ], }, "multiple_suppressions": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule: first reason # lint: first reason continued. # lint-ignore: UsedRule2: second reason # lint: second reason continued. foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [UsedRule], "suppressed_line": 5, "expected_unused_suppressions_report_messages": [UNUSED_SUPPRESSION_COMMENT_MESSAGE], "expected_replacements": [ dedent_with_lstrip(""" # lint-ignore: UsedRule2: second reason # lint: second reason continued. foo = bar """) ], }, "multiple_unused_suppressions": { "source": dedent_with_lstrip(""" # lint-ignore: UsedRule: first reason # lint: first reason continued. # lint-ignore: UsedRule2: second reason # lint: second reason continued. foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [UsedRule, UsedRule2], "suppressed_line": 5, "expected_unused_suppressions_report_messages": [ UNUSED_SUPPRESSION_COMMENT_MESSAGE, UNUSED_SUPPRESSION_COMMENT_MESSAGE, ], "expected_replacements": [ dedent_with_lstrip(""" # lint-ignore: UsedRule2: second reason # lint: second reason continued. foo = bar """), dedent_with_lstrip(""" # lint-ignore: UsedRule: first reason # lint: first reason continued. foo = bar """), ], }, "suppressions_with_unlinted_codes_oneline": { "source": dedent_with_lstrip(""" # lint-ignore: UnusedRule foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [], "suppressed_line": 2, "expected_unused_suppressions_report_messages": [], }, "suppressions_with_unlinted_codes_multiline": { "source": dedent_with_lstrip(""" # lint-ignore: UnusedRule: reason # lint: reason continued foo = bar """).encode(), "rules_in_lint_run": [UsedRule, UsedRule2], "rules_without_report": [], "suppressed_line": 3, "expected_unused_suppressions_report_messages": [], }, }) def test( self, *, source: bytes, rules_in_lint_run: Collection[Type[CstLintRule]], rules_without_report: Collection[Type[CstLintRule]], suppressed_line: int, expected_unused_suppressions_report_messages: Collection[str], expected_replacements: Optional[List[str]] = None, ) -> None: reports = [ CstLintRuleReport( file_path=FILE_PATH, node=cst.EmptyLine(), code=rule.__name__, message="message", line=suppressed_line, column=0, module=cst.MetadataWrapper(cst.parse_module(source)), module_bytes=source, ) for rule in rules_in_lint_run if rule not in rules_without_report ] tokens = _get_tokens(source) ignore_info = IgnoreInfo.compute( comment_info=CommentInfo.compute(tokens=tokens), line_mapping_info=LineMappingInfo.compute(tokens=tokens), ) cst_wrapper = MetadataWrapper(cst.parse_module(source), unsafe_skip_copy=True) config = LintConfig( rule_config={ RemoveUnusedSuppressionsRule.__name__: { "ignore_info": ignore_info, "rules": rules_in_lint_run, } }) unused_suppressions_context = CstContext(cst_wrapper, source, FILE_PATH, config) for report in reports: ignore_info.should_ignore_report(report) _visit_cst_rules_with_context(cst_wrapper, [RemoveUnusedSuppressionsRule], unused_suppressions_context) messages = [] patches = [] for report in unused_suppressions_context.reports: messages.append(report.message) patches.append(report.patch) self.assertEqual(messages, expected_unused_suppressions_report_messages) if expected_replacements is None: self.assertEqual(len(patches), 0) else: self.assertEqual(len(patches), len(expected_replacements)) for idx, patch in enumerate(patches): replacement = patch.apply(source.decode()) self.assertEqual(replacement, expected_replacements[idx]) def test_compose_new_comment_oneline(self) -> None: source = dedent_with_lstrip( "# lint-fixme: UsedRule, UsedRule2: reason...") tokens = _get_tokens(source.encode()) ignore_info = IgnoreInfo.compute( comment_info=CommentInfo.compute(tokens=tokens), line_mapping_info=LineMappingInfo.compute(tokens=tokens), ) local_suppression_comments = (ignore_info.local_ignore_info. local_suppression_comments_by_line[1]) self.assertEqual(len(local_suppression_comments), 1) local_suppression_comment = local_suppression_comments[0] # First code unneeded. unneeded_codes = ["UsedRule"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = ["# lint-fixme: UsedRule2: reason..."] self.assertEqual(new_comment_lines, expected_new_lines) # Second code unneeded. unneeded_codes = ["UsedRule2"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = ["# lint-fixme: UsedRule: reason..."] self.assertEqual(new_comment_lines, expected_new_lines) # Both codes unneded. unneeded_codes = ["UsedRule", "UsedRule2"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [] self.assertEqual(new_comment_lines, expected_new_lines) # Both codes needed. unneeded_codes = [] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) # Should be unchanged. expected_new_lines = [source] self.assertEqual(new_comment_lines, expected_new_lines) def test_compose_new_comment_multiline(self) -> None: source = dedent_with_lstrip(""" # lint-fixme: UsedRule, UsedRule2: reason... # lint: reason continued """) tokens = _get_tokens(source.encode()) ignore_info = IgnoreInfo.compute( comment_info=CommentInfo.compute(tokens=tokens), line_mapping_info=LineMappingInfo.compute(tokens=tokens), ) local_suppression_comments = (ignore_info.local_ignore_info. local_suppression_comments_by_line[1]) self.assertEqual(len(local_suppression_comments), 1) local_suppression_comment = local_suppression_comments[0] # First code unneeded. unneeded_codes = ["UsedRule"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [ "# lint-fixme: UsedRule2: reason... reason", "# lint: continued", ] self.assertEqual(new_comment_lines, expected_new_lines) # Second code unneeded. unneeded_codes = ["UsedRule2"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [ "# lint-fixme: UsedRule: reason... reason", "# lint: continued", ] self.assertEqual(new_comment_lines, expected_new_lines) # Both codes unneded. unneeded_codes = ["UsedRule", "UsedRule2"] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [] self.assertEqual(new_comment_lines, expected_new_lines) # Both codes needed. unneeded_codes = [] new_comment_lines = _compose_new_comment(local_suppression_comment, unneeded_codes, 1) expected_new_lines = [ "# lint-fixme: UsedRule, UsedRule2: reason...", "# lint: reason continued", ] self.assertEqual(new_comment_lines, expected_new_lines)
class InsertSuppressionsTest(UnitTest): @data_provider({ "simple_fixme": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" # lint-fixme: IgnoredRule: Some message def fn(): ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=1, code="IgnoredRule", message="Some message", ) ], }, "simple_ignore": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" # lint-ignore: IgnoredRule: Some message def fn(): ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.IGNORE, before_line=1, code="IgnoredRule", message="Some message", ) ], }, "no_message": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" # lint-fixme: IgnoredRule def fn(): ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=1, code="IgnoredRule", ) ], }, "indented": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" def fn(): # lint-fixme: IgnoredRule: Some message ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=2, code="IgnoredRule", message="Some message", ) ], }, "indented_tabs": { "before": dedent_with_lstrip(""" def fn(): \t... """), "after": dedent_with_lstrip(""" def fn(): \t# lint-fixme: IgnoredRule: Some message \t... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=2, code="IgnoredRule", message="Some message", ) ], }, "multiple_comments": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" # lint-fixme: IgnoredRule: Some message # lint-fixme: IgnoredRule1: Another message def fn(): # lint-fixme: IgnoredRule2: Yet another ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=1, code="IgnoredRule", message="Some message", ), SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=1, code="IgnoredRule1", message="Another message", ), SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=2, code="IgnoredRule2", message="Yet another", ), ], }, "multiline_comment": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" def fn(): # lint-fixme: IgnoredRule: # lint: Some really long # lint: message that # lint: rambles on and on # lint: that needs to be # lint: wrapped ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=2, code="IgnoredRule", message=( "Some really long message that rambles on and on that " + "needs to be wrapped"), max_lines=(2**32), ) ], "code_width": 30, }, "newlines_in_message": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" def fn(): # lint-fixme: IgnoredRule: This is the first line. # lint: This is a subsequent line followed by a blank line. # lint: # lint: And this is the last line. ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=2, code="IgnoredRule", message=( "This is the first line.\n" + "This is a subsequent line followed by a blank line.\n" + "\n" + "And this is the last line."), max_lines=(2**32), ) ], }, "logical_line_continuation": { "before": dedent_with_lstrip(""" value = "abc" value = \\ "abcd" + \\ "efgh" + \\ "ijkl" + \\ "mnop" """), "after": dedent_with_lstrip(""" value = "abc" # lint-fixme: IgnoredRule: Some message value = \\ "abcd" + \\ "efgh" + \\ "ijkl" + \\ "mnop" """), "comments": [ # Line 4 isn't a logical line, so we expect that the comment will # be put on the first logical line above it. SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=4, code="IgnoredRule", message="Some message", ) ], }, "logical_line_multiline_string": { "before": dedent_with_lstrip(""" value = "abc" value = ''' abcd efgh ijkl mnop ''' """), "after": dedent_with_lstrip(""" value = "abc" # lint-fixme: IgnoredRule: Some message value = ''' abcd efgh ijkl mnop ''' """), "comments": [ # Line 4 isn't a logical line, so we expect that the comment will # be put on the first logical line above it. SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=4, code="IgnoredRule", message="Some message", ) ], }, "max_lines_first_block": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" # lint-fixme: IgnoredRule: first block ... def fn(): ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=1, code="IgnoredRule", message="first block\n\nsecond block\nthird block", max_lines=1, ) ], }, "max_lines_between_blocks": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" # lint-fixme: IgnoredRule: first block # lint: ... def fn(): ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=1, code="IgnoredRule", message="first block\n\nsecond block\nthird block", max_lines=2, ) ], }, "max_lines_subsequent_blocks": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" # lint-fixme: IgnoredRule: first block # lint: # lint: second block ... def fn(): ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=1, code="IgnoredRule", message="first block\n\nsecond block\nthird block", max_lines=3, ) ], }, # In this example the last visible line wouldn't normally need to be # truncated, but we don't quite have enough space for the "[...]" ellipsis # at the end. "max_lines_requires_trimming": { "before": dedent_with_lstrip(""" def fn(): ... """), "after": dedent_with_lstrip(""" # lint-fixme: IgnoredRule: first line # lint: second line which is too ... def fn(): ... """), "comments": [ SuppressionComment( kind=SuppressionCommentKind.FIXME, before_line=1, code="IgnoredRule", message= "first line\nsecond line which is too long\nlast line", max_lines=2, ) ], "code_width": 40, # the truncated comment is 38 characters long (<40) }, }) def test_insert_suppressions( self, *, before: str, after: str, comments: Iterable[SuppressionComment], code_width: int = 1000, min_comment_width: int = 1, ) -> None: result = insert_suppressions( before.encode("utf-8"), comments, code_width=code_width, min_comment_width=min_comment_width, ) updated_source = result.updated_source.decode("utf-8") self.assertEqual(updated_source, after) self.assertEqual(len(result.failed_insertions), 0)