示例#1
0
 def test_ignored_lines(self, *, source: str, ignored_code: str,
                        ignored_lines: Container[int]) -> None:
     tokens = tuple(
         tokenize.tokenize(BytesIO(source.encode("utf-8")).readline))
     ignore_info = IgnoreInfo.compute(
         comment_info=CommentInfo.compute(tokens=tokens),
         line_mapping_info=LineMappingInfo.compute(tokens=tokens),
     )
     lines = range(1, tokens[-1].end[0] + 1)
     actual_ignored_lines = []
     for line in lines:
         ignored = ignore_info.should_ignore_report(
             CstLintRuleReport(
                 file_path=Path("fake/path.py"),
                 node=cst.EmptyLine(),
                 code=ignored_code,
                 message="message",
                 line=line,
                 column=0,
                 module=cst.MetadataWrapper(cst.parse_module(source)),
                 module_bytes=source.encode("utf-8"),
             ))
         if ignored:
             actual_ignored_lines.append(line)
     # pyre-fixme[6]: Expected `Iterable[Variable[_T]]` for 1st param but got
     #  `Container[int]`.
     self.assertEqual(actual_ignored_lines, list(ignored_lines))
示例#2
0
    def test_multiline_suppression(self) -> None:
        source = """
        # lint-ignore: SomeCode: some reason
        # lint: and some reason continued
        # lint: onto multiple lines.
        x = "Some ignored violation"
        """
        tokens = tuple(
            tokenize.tokenize(BytesIO(source.encode("utf-8")).readline))
        ignore_info = IgnoreInfo.compute(
            comment_info=CommentInfo.compute(tokens=tokens),
            line_mapping_info=LineMappingInfo.compute(tokens=tokens),
        )

        self.assertEqual(
            len(ignore_info.local_ignore_info.local_suppression_comments), 1)
        local_supp_comment = next(
            lsc for lsc in
            ignore_info.local_ignore_info.local_suppression_comments)
        self.assertEqual(
            local_supp_comment.reason,
            "some reason and some reason continued onto multiple lines.",
        )
        # Verify that all local suppression comment lines map to the same SuppressionComment instance.
        for (
                lines,
                supp_comments,
        ) in ignore_info.local_ignore_info.local_suppression_comments_by_line.items(
        ):
            self.assertEqual(len(supp_comments), 1)
            supp_comment = supp_comments[0]
            self.assertIs(supp_comment, local_supp_comment)
示例#3
0
def insert_suppressions(
    source: bytes,
    comments: Iterable[SuppressionComment],
    *,
    code_width: int = DEFAULT_CODE_WIDTH,
    min_comment_width: int = DEFAULT_MIN_COMMENT_WIDTH,
) -> InsertSuppressionsResult:
    """
    Given an iterable of `lines`, forms a new sequence of lines with `comments`
    inserted.
    """
    encoding = tokenize.detect_encoding(BytesIO(source).readline)[0]
    tokens = tuple(tokenize.tokenize(BytesIO(source).readline))
    indentations = _get_indentations(tokens)
    physical_to_logical = LineMappingInfo.compute(
        tokens=tokens).physical_to_logical
    comments_queue = deque(sorted(comments))  # sort by line number
    updated_lines = []

    for line_number, line_bytes in enumerate(BytesIO(source).readlines(),
                                             start=1):
        while comments_queue:
            target_line = physical_to_logical[comments_queue[0].before_line]
            if target_line == line_number:
                indent = indentations[line_number]
                width = max(code_width - len(indent), min_comment_width)
                for line in comments_queue.popleft().to_lines(width):
                    updated_lines.append(f"{indent}{line}\n".encode(encoding))
            else:
                break
        updated_lines.append(line_bytes)

    return InsertSuppressionsResult(updated_source=b"".join(updated_lines),
                                    failed_insertions=tuple(comments_queue))
示例#4
0
    def test(
        self,
        *,
        source: bytes,
        rules_in_lint_run: Collection[Type[CstLintRule]],
        rules_without_report: Collection[Type[CstLintRule]],
        suppressed_line: int,
        expected_unused_suppressions_report_messages: Collection[str],
        expected_replacements: Optional[List[str]] = None,
    ) -> None:
        reports = [
            CstLintRuleReport(
                file_path=FILE_PATH,
                node=cst.EmptyLine(),
                code=rule.__name__,
                message="message",
                line=suppressed_line,
                column=0,
                module=cst.MetadataWrapper(cst.parse_module(source)),
                module_bytes=source,
            ) for rule in rules_in_lint_run if rule not in rules_without_report
        ]
        tokens = _get_tokens(source)
        ignore_info = IgnoreInfo.compute(
            comment_info=CommentInfo.compute(tokens=tokens),
            line_mapping_info=LineMappingInfo.compute(tokens=tokens),
        )
        cst_wrapper = MetadataWrapper(cst.parse_module(source),
                                      unsafe_skip_copy=True)
        config = LintConfig(
            rule_config={
                RemoveUnusedSuppressionsRule.__name__: {
                    "ignore_info": ignore_info,
                    "rules": rules_in_lint_run,
                }
            })
        unused_suppressions_context = CstContext(cst_wrapper, source,
                                                 FILE_PATH, config)
        for report in reports:
            ignore_info.should_ignore_report(report)
        _visit_cst_rules_with_context(cst_wrapper,
                                      [RemoveUnusedSuppressionsRule],
                                      unused_suppressions_context)

        messages = []
        patches = []
        for report in unused_suppressions_context.reports:
            messages.append(report.message)
            patches.append(report.patch)

        self.assertEqual(messages,
                         expected_unused_suppressions_report_messages)
        if expected_replacements is None:
            self.assertEqual(len(patches), 0)
        else:
            self.assertEqual(len(patches), len(expected_replacements))

            for idx, patch in enumerate(patches):
                replacement = patch.apply(source.decode())
                self.assertEqual(replacement, expected_replacements[idx])
示例#5
0
    def test_compose_new_comment_multiline(self) -> None:
        source = dedent_with_lstrip("""
            # lint-fixme: UsedRule, UsedRule2: reason...
            # lint: reason continued
            """)
        tokens = _get_tokens(source.encode())
        ignore_info = IgnoreInfo.compute(
            comment_info=CommentInfo.compute(tokens=tokens),
            line_mapping_info=LineMappingInfo.compute(tokens=tokens),
            use_noqa=False,
        )
        local_suppression_comments = (ignore_info.local_ignore_info.
                                      local_suppression_comments_by_line[1])
        self.assertEqual(len(local_suppression_comments), 1)
        local_suppression_comment = local_suppression_comments[0]

        # First code unneeded.
        unneeded_codes = ["UsedRule"]
        new_comment_lines = _compose_new_comment(local_suppression_comment,
                                                 unneeded_codes, 1)
        expected_new_lines = [
            "# lint-fixme: UsedRule2: reason... reason",
            "# lint: continued",
        ]
        self.assertEqual(new_comment_lines, expected_new_lines)

        # Second code unneeded.
        unneeded_codes = ["UsedRule2"]
        new_comment_lines = _compose_new_comment(local_suppression_comment,
                                                 unneeded_codes, 1)
        expected_new_lines = [
            "# lint-fixme: UsedRule: reason... reason",
            "# lint: continued",
        ]
        self.assertEqual(new_comment_lines, expected_new_lines)

        # Both codes unneded.
        unneeded_codes = ["UsedRule", "UsedRule2"]
        new_comment_lines = _compose_new_comment(local_suppression_comment,
                                                 unneeded_codes, 1)
        expected_new_lines = []
        self.assertEqual(new_comment_lines, expected_new_lines)

        # Both codes needed.
        unneeded_codes = []
        new_comment_lines = _compose_new_comment(local_suppression_comment,
                                                 unneeded_codes, 1)
        expected_new_lines = [
            "# lint-fixme: UsedRule, UsedRule2: reason...",
            "# lint: reason continued",
        ]
        self.assertEqual(new_comment_lines, expected_new_lines)
示例#6
0
 def test_line_mapping(
     self,
     *,
     code: str,
     physical_to_logical: Mapping[int, int],
     next_non_empty_logical_line: Mapping[int, int],
 ) -> None:
     tokens = tokenize.tokenize(BytesIO(code.encode("utf-8")).readline)
     result = LineMappingInfo.compute(tokens=tokens)
     self.assertEqual(dict(result.physical_to_logical),
                      dict(physical_to_logical))
     for input, expected in next_non_empty_logical_line.items():
         self.assertEqual(result.get_next_non_empty_logical_line(input),
                          expected)
示例#7
0
    def compute(*, comment_info: CommentInfo,
                line_mapping_info: LineMappingInfo) -> "LocalIgnoreInfo":
        local_suppression_comments: List[SuppressionComment] = []
        local_suppression_comments_by_line: Dict[
            int, List[SuppressionComment]] = defaultdict(list)

        # New `# lint-fixme` and `# lint-ignore` comments. These are preferred over
        # legacy `# noqa` comments.
        for tok in comment_info.comments_on_own_line:
            match = LINT_IGNORE_REGEXP.fullmatch(tok.string)
            if match:
                start_line = line_mapping_info.physical_to_logical[
                    tok.start[0]]
                end_line = line_mapping_info.get_next_non_empty_logical_line(
                    tok.start[0])
                assert end_line is not None, "Failed to get next non-empty logical line"
                codes = _parse_comma_separated_rules(match.group("codes"))
                # TODO: These suppressions can span multiple lines. We need to find
                # every comment token (lines beginning with `# lint:`) associated with
                # this suppression, not just the first.
                comment = SuppressionComment(codes, [tok])
                local_suppression_comments.append(comment)
                for logical_line in range(start_line, end_line + 1):
                    local_suppression_comments_by_line[logical_line].append(
                        comment)

        # Legacy inline `# noqa` comments. This matches flake8's behavior.
        # Process these after `# lint-ignore` comments, because in the case of duplicate
        # or overlapping ignores, we'd prefer to mark the noqa as unused, instead of the
        # more modern `# lint-ignore` comment.
        for tok in comment_info.comments:
            match = NOQA_INLINE_REGEXP.search(tok.string)
            if match:
                normalized_line = line_mapping_info.physical_to_logical[
                    tok.start[0]]
                codes = _parse_comma_separated_rules(match.group("codes"))
                comment = SuppressionComment(codes, [tok])
                local_suppression_comments.append(comment)
                local_suppression_comments_by_line[normalized_line].append(
                    comment)

        return LocalIgnoreInfo(
            local_suppression_comments,
            dict(
                local_suppression_comments_by_line),  # no longer a defaultdict
            line_mapping_info,
        )
示例#8
0
    def test_unused_comments(
        self,
        *,
        source: str,
        reports_on_lines: Iterable[Tuple[int, str]],
        unused_comments: Iterable[int],
    ) -> None:
        """
        Verify that we can correctly track which lint comments were used and which were
        unused.

        TODO: We don't track usage of global ignore comments, so we can't know if
        they're unused.
        """
        tokens = tuple(
            tokenize.tokenize(BytesIO(source.encode("utf-8")).readline))
        ignore_info = IgnoreInfo.compute(
            comment_info=CommentInfo.compute(tokens=tokens),
            line_mapping_info=LineMappingInfo.compute(tokens=tokens),
            use_noqa=True,
        )

        for line, code in reports_on_lines:
            ignore_info.should_ignore_report(
                CstLintRuleReport(
                    file_path=Path("fake/path.py"),
                    node=cst.EmptyLine(),
                    code=code,
                    message="message",
                    line=line,
                    column=0,
                    module=cst.MetadataWrapper(cst.parse_module(source)),
                    module_bytes=source.encode("utf-8"),
                ))

        self.assertEqual(
            sorted([
                min(tok.start[0] for tok in c.tokens)
                for c in ignore_info.suppression_comments if not c.used_by
            ]),
            sorted(unused_comments),
        )
示例#9
0
def lint_file(
    file_path: Path,
    source: bytes,
    *,
    use_ignore_byte_markers: bool = True,
    use_ignore_comments: bool = True,
    config: Optional[LintConfig] = None,
    rules: LintRuleCollectionT,
    cst_wrapper: Optional[MetadataWrapper] = None,
    find_unused_suppressions: bool = False,
) -> Collection[BaseLintRuleReport]:
    """
    May raise a SyntaxError, which should be handled by the
    caller.
    """
    # Get settings from the nearest `.fixit.config.yaml` file if necessary.
    config: LintConfig = config if config is not None else get_lint_config()

    if use_ignore_byte_markers and any(
        pattern.encode() in source for pattern in config.block_list_patterns
    ):
        return []

    tokens = None
    if use_ignore_comments:
        # Don't compute tokens unless we have to, it slows down
        # `fixit.cli.run_rules`.
        #
        # `tokenize` is actually much more expensive than generating the whole AST,
        # since AST parsing is heavily optimized C, and tokenize is pure python.
        tokens = _get_tokens(source)
        ignore_info = IgnoreInfo.compute(
            comment_info=CommentInfo.compute(tokens=tokens),
            line_mapping_info=LineMappingInfo.compute(tokens=tokens),
        )
    else:
        ignore_info = None

    # Don't waste time evaluating rules that are globally ignored.
    evaluated_rules = [
        r for r in rules if ignore_info is None or ignore_info.should_evaluate_rule(r)
    ]
    # Categorize lint rules.
    cst_rules: List[Type[CstLintRule]] = []
    pseudo_rules: List[Type[PseudoLintRule]] = []
    for r in evaluated_rules:
        if issubclass(r, CstLintRule):
            cst_rules.append(cast(Type[CstLintRule], r))
        elif issubclass(r, PseudoLintRule):
            pseudo_rules.append(cast(Type[PseudoLintRule], r))

    # `self.context.report()` accumulates reports into the context object, we'll copy
    # those into our local `reports` list.
    ast_tree = None
    reports = []

    if cst_wrapper is None:
        cst_wrapper = MetadataWrapper(cst.parse_module(source), unsafe_skip_copy=True)
    if cst_rules:
        cst_context = CstContext(cst_wrapper, source, file_path, config)
        _visit_cst_rules_with_context(cst_wrapper, cst_rules, cst_context)
        reports.extend(cst_context.reports)

    if pseudo_rules:
        psuedo_context = PseudoContext(file_path, source, tokens, ast_tree)
        for pr_cls in pseudo_rules:
            reports.extend(pr_cls(psuedo_context).lint_file())

    if ignore_info is not None:
        # filter the accumulated errors that should be suppressed and report unused suppressions
        reports = [r for r in reports if not ignore_info.should_ignore_report(r)]
        if find_unused_suppressions and cst_rules:
            # We had to make sure to call ignore_info.should_ignore_report before running our
            # RemoveUnusedSuppressionsRule because ignore_info needs to be up to date for it to work.
            # We can construct a new context since we want a fresh set of reports to append to reports.
            config.rule_config[RemoveUnusedSuppressionsRule.__name__] = {
                "ignore_info": ignore_info,
                "rules": cst_rules,
            }
            unused_suppressions_context = CstContext(
                cst_wrapper, source, file_path, config
            )
            _visit_cst_rules_with_context(
                cst_wrapper, [RemoveUnusedSuppressionsRule], unused_suppressions_context
            )
            reports.extend(unused_suppressions_context.reports)

    return reports
示例#10
0
    def compute(*, comment_info: CommentInfo,
                line_mapping_info: LineMappingInfo) -> "LocalIgnoreInfo":
        local_suppression_comments: List[SuppressionComment] = []
        local_suppression_comments_by_line: Dict[
            int, List[SuppressionComment]] = defaultdict(list)

        # New `# lint-fixme` and `# lint-ignore` comments. These are preferred over
        # legacy `# noqa` comments.
        comments_on_own_line_iter = iter(comment_info.comments_on_own_line)
        next_comment_line: Optional[tokenize.TokenInfo] = next(
            comments_on_own_line_iter, None)

        while next_comment_line is not None:
            match = LINT_IGNORE_REGEXP.fullmatch(next_comment_line.string)
            if match is not None:
                # We are at the *start* of a suppression comment. There may be more physical lines
                # to the comment. We assume any lines starting with `# lint: ` are a continuation.
                start_line = next_comment_line.start[0]
                end_line = line_mapping_info.get_next_non_empty_logical_line(
                    start_line)
                assert end_line is not None, "Failed to get next non-empty logical line"

                (
                    tokens,
                    reason,
                    next_comment_line,
                ) = LocalIgnoreInfo.get_all_tokens_and_full_reason(
                    [next_comment_line],
                    comments_on_own_line_iter,
                    end_line,
                    match.group("reason"),
                )

                codes = _parse_comma_separated_rules(match.group("codes"))
                # Construct the SuppressionComment with all the information.
                comment = SuppressionComment(codes, tokens, match.group(1),
                                             reason)

                local_suppression_comments.append(comment)

                for tok in tokens:
                    local_suppression_comments_by_line[tok.start[0]].append(
                        comment)

                # Finally we want to map the suppressed line of code to this suppression comment.
                local_suppression_comments_by_line[end_line].append(comment)
            else:
                next_comment_line = next(comments_on_own_line_iter, None)

        # Legacy inline `# noqa` comments. This matches flake8's behavior.
        # Process these after `# lint-ignore` comments, because in the case of duplicate
        # or overlapping ignores, we'd prefer to mark the noqa as unused, instead of the
        # more modern `# lint-ignore` comment.
        for tok in comment_info.comments:
            match = NOQA_INLINE_REGEXP.search(tok.string)
            if match:
                normalized_line = line_mapping_info.physical_to_logical[
                    tok.start[0]]
                codes = _parse_comma_separated_rules(match.group("codes"))
                comment = SuppressionComment(codes, [tok], kind="noqa")
                local_suppression_comments.append(comment)
                local_suppression_comments_by_line[normalized_line].append(
                    comment)

        return LocalIgnoreInfo(
            local_suppression_comments,
            dict(
                local_suppression_comments_by_line),  # no longer a defaultdict
            line_mapping_info,
        )