def _check_string_modifiers(self, token: tokenize.TokenInfo) -> None: if token.string.lower().startswith('u'): self.add_violation( UnicodeStringViolation(token, text=token.string), ) modifiers, _ = split_prefixes(token) for mod in modifiers: if mod in self._bad_string_modifiers: self.add_violation( UppercaseStringModifierViolation(token, text=mod), )
def _check_string_modifiers(self, token: tokenize.TokenInfo) -> None: if token.string.startswith('u'): self.add_violation( UnicodeStringViolation(token, text=token.string), ) modifiers = re.split(r'[\'\"]', token.string)[0] if modifiers: for mod in self._bad_string_modifiers: if mod in modifiers: self.add_violation( UppercaseStringModifierViolation(token, text=mod), )