コード例 #1
0
    def _check_unnecessary_raw_string(self, token: tokenize.TokenInfo) -> None:
        modifiers, string_def = split_prefixes(token.string)

        if 'r' in modifiers.lower() and '\\' not in string_def:
            self.add_violation(
                consistency.RawStringNotNeededViolation(token,
                                                        text=string_def), )
コード例 #2
0
    def _check_implicit_raw_string(self, token: tokenize.TokenInfo) -> None:
        modifiers, string_def = split_prefixes(token.string)
        if 'r' in modifiers.lower():
            return

        if self._implicit_raw_strings.search(_replace_braces(string_def)):
            self.add_violation(
                consistency.ImplicitRawStringViolation(
                    token,
                    text=token.string,
                ), )
コード例 #3
0
    def _check_string_modifiers(self, token: tokenize.TokenInfo) -> None:
        modifiers, _ = split_prefixes(token.string)

        if 'u' in modifiers.lower():
            self.add_violation(
                consistency.UnicodeStringViolation(token, text=token.string), )

        for mod in modifiers:
            if mod in self._bad_string_modifiers:
                self.add_violation(
                    consistency.UppercaseStringModifierViolation(
                        token,
                        text=mod,
                    ), )
コード例 #4
0
    def _check_wrong_unicode_escape(self, token: tokenize.TokenInfo) -> None:
        # See: http://docs.python.org/reference/lexical_analysis.html
        modifiers, string_body = split_prefixes(token.string)

        index = 0
        while True:
            index = string_body.find('\\', index)
            if index == -1:
                break

            next_char = string_body[index + 1]
            if 'b' in modifiers.lower() and next_char in self._unicode_escapes:
                self.add_violation(
                    WrongUnicodeEscapeViolation(token, text=token.string), )

            # Whether it was a valid escape or not, backslash followed by
            # another character can always be consumed whole: the second
            # character can never be the start of a new backslash escape.
            index += 2
コード例 #5
0
 def _check_correct_multiline(self, token: tokenize.TokenInfo) -> None:
     _, string_def = split_prefixes(token.string)
     if has_triple_string_quotes(string_def):
         if '\n' not in string_def and token not in self._docstrings:
             self.add_violation(
                 consistency.WrongMultilineStringViolation(token), )