Exemple #1
0
    def compute(*, comment_info: CommentInfo,
                line_mapping_info: LineMappingInfo) -> "LocalIgnoreInfo":
        local_suppression_comments: List[SuppressionComment] = []
        local_suppression_comments_by_line: Dict[
            int, List[SuppressionComment]] = defaultdict(list)

        # New `# lint-fixme` and `# lint-ignore` comments. These are preferred over
        # legacy `# noqa` comments.
        for tok in comment_info.comments_on_own_line:
            match = LINT_IGNORE_REGEXP.fullmatch(tok.string)
            if match:
                start_line = line_mapping_info.physical_to_logical[
                    tok.start[0]]
                end_line = line_mapping_info.get_next_non_empty_logical_line(
                    tok.start[0])
                assert end_line is not None, "Failed to get next non-empty logical line"
                codes = _parse_comma_separated_rules(match.group("codes"))
                # TODO: These suppressions can span multiple lines. We need to find
                # every comment token (lines beginning with `# lint:`) associated with
                # this suppression, not just the first.
                comment = SuppressionComment(codes, [tok])
                local_suppression_comments.append(comment)
                for logical_line in range(start_line, end_line + 1):
                    local_suppression_comments_by_line[logical_line].append(
                        comment)

        # Legacy inline `# noqa` comments. This matches flake8's behavior.
        # Process these after `# lint-ignore` comments, because in the case of duplicate
        # or overlapping ignores, we'd prefer to mark the noqa as unused, instead of the
        # more modern `# lint-ignore` comment.
        for tok in comment_info.comments:
            match = NOQA_INLINE_REGEXP.search(tok.string)
            if match:
                normalized_line = line_mapping_info.physical_to_logical[
                    tok.start[0]]
                codes = _parse_comma_separated_rules(match.group("codes"))
                comment = SuppressionComment(codes, [tok])
                local_suppression_comments.append(comment)
                local_suppression_comments_by_line[normalized_line].append(
                    comment)

        return LocalIgnoreInfo(
            local_suppression_comments,
            dict(
                local_suppression_comments_by_line),  # no longer a defaultdict
            line_mapping_info,
        )
Exemple #2
0
    def compute(*, comment_info: CommentInfo,
                line_mapping_info: LineMappingInfo) -> "LocalIgnoreInfo":
        local_suppression_comments: List[SuppressionComment] = []
        local_suppression_comments_by_line: Dict[
            int, List[SuppressionComment]] = defaultdict(list)

        # New `# lint-fixme` and `# lint-ignore` comments. These are preferred over
        # legacy `# noqa` comments.
        comments_on_own_line_iter = iter(comment_info.comments_on_own_line)
        next_comment_line: Optional[tokenize.TokenInfo] = next(
            comments_on_own_line_iter, None)

        while next_comment_line is not None:
            match = LINT_IGNORE_REGEXP.fullmatch(next_comment_line.string)
            if match is not None:
                # We are at the *start* of a suppression comment. There may be more physical lines
                # to the comment. We assume any lines starting with `# lint: ` are a continuation.
                start_line = next_comment_line.start[0]
                end_line = line_mapping_info.get_next_non_empty_logical_line(
                    start_line)
                assert end_line is not None, "Failed to get next non-empty logical line"

                (
                    tokens,
                    reason,
                    next_comment_line,
                ) = LocalIgnoreInfo.get_all_tokens_and_full_reason(
                    [next_comment_line],
                    comments_on_own_line_iter,
                    end_line,
                    match.group("reason"),
                )

                codes = _parse_comma_separated_rules(match.group("codes"))
                # Construct the SuppressionComment with all the information.
                comment = SuppressionComment(codes, tokens, match.group(1),
                                             reason)

                local_suppression_comments.append(comment)

                for tok in tokens:
                    local_suppression_comments_by_line[tok.start[0]].append(
                        comment)

                # Finally we want to map the suppressed line of code to this suppression comment.
                local_suppression_comments_by_line[end_line].append(comment)
            else:
                next_comment_line = next(comments_on_own_line_iter, None)

        # Legacy inline `# noqa` comments. This matches flake8's behavior.
        # Process these after `# lint-ignore` comments, because in the case of duplicate
        # or overlapping ignores, we'd prefer to mark the noqa as unused, instead of the
        # more modern `# lint-ignore` comment.
        for tok in comment_info.comments:
            match = NOQA_INLINE_REGEXP.search(tok.string)
            if match:
                normalized_line = line_mapping_info.physical_to_logical[
                    tok.start[0]]
                codes = _parse_comma_separated_rules(match.group("codes"))
                comment = SuppressionComment(codes, [tok], kind="noqa")
                local_suppression_comments.append(comment)
                local_suppression_comments_by_line[normalized_line].append(
                    comment)

        return LocalIgnoreInfo(
            local_suppression_comments,
            dict(
                local_suppression_comments_by_line),  # no longer a defaultdict
            line_mapping_info,
        )