def testTrailingCommaGood(self):
        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('(a, )\n'))
            self.checker.process_tokens(_tokenize_str('(a,)\n'))

        self.checker.config.no_space_check = []
        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('(a,)\n'))
Esempio n. 2
0
 def test_issue_2321_should_trigger(self):
     code = "# TODO this should not trigger a fixme"
     with self.assertAddsMessages(
             Message(msg_id="fixme",
                     line=1,
                     args="TODO this should not trigger a fixme")):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 3
0
 def test_without_space_fixme(self):
     code = """a = 1
             #FIXME
             """
     with self.assertAddsMessages(
             Message(msg_id="fixme", line=2, args="FIXME")):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 4
0
 def test_todo_without_message(self):
     code = """a = 1
             # TODO
             """
     with self.assertAddsMessages(
             Message(msg_id="fixme", line=2, args="TODO")):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 5
0
 def testComma(self):
     with self.assertAddsMessages(
             Message('bad-whitespace',
                     line=1,
                     args=('No', 'allowed', 'before', 'comma',
                           '(a , b)\n   ^'))):
         self.checker.process_tokens(_tokenize_str('(a , b)\n'))
Esempio n. 6
0
 def testCheckKeywordParensHandlesUnnecessaryParens(self):
     self.checker._keywords_with_parens = set()
     cases = [
         (Message('superfluous-parens', line=1, args='if'), 'if (foo):', 0),
         (Message('superfluous-parens', line=1,
                  args='if'), 'if ((foo, bar)):', 0),
         (Message('superfluous-parens', line=1,
                  args='if'), 'if (foo(bar)):', 0),
         (Message('superfluous-parens', line=1,
                  args='return'), 'return ((x for x in x))', 0),
         (Message('superfluous-parens', line=1,
                  args='not'), 'not (foo)', 0),
         (Message('superfluous-parens', line=1,
                  args='not'), 'if not (foo):', 1),
         (Message('superfluous-parens', line=1,
                  args='if'), 'if (not (foo)):', 0),
         (Message('superfluous-parens', line=1,
                  args='not'), 'if (not (foo)):', 2),
         (Message('superfluous-parens', line=1,
                  args='for'), 'for (x) in (1, 2, 3):', 0),
         (Message('superfluous-parens', line=1,
                  args='if'), 'if (1) in (1, 2, 3):', 0),
     ]
     for msg, code, offset in cases:
         with self.assertAddsMessages(msg):
             self.checker._check_keyword_parentheses(
                 _tokenize_str(code), offset)
Esempio n. 7
0
 def test_other_present_codetag(self):
     code = """a = 1
             # CODETAG
             # FIXME
             """
     with self.assertAddsMessages(Message(msg_id="fixme", line=2, args="CODETAG")):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 8
0
 def test_finds_violations_in_comments(self):
     with self.assertAddsMessages(
             Message("inclusive-comments-violation",
                     line=1,
                     args=('master', 'leader, primary, parent'))):
         self.checker.process_tokens(
             _tokenize_str("# this is a master comment"))
Esempio n. 9
0
 def test_non_ascii_bytes_literal(self):
     code = 'b"测试"'
     self._test_token_message(code, 'non-ascii-bytes-literal')
     for code in ("测试", u"测试", u'abcdef', b'\x80'):
         tokens = testutils._tokenize_str(code)
         with self.assertNoMessages():
             self.checker.process_tokens(tokens)
 def testCheckKeywordParensHandlesUnnecessaryParens(self):
     self.checker._keywords_with_parens = set()
     cases = [
         (Message("superfluous-parens", line=1, args="if"), "if (foo):", 0),
         (Message("superfluous-parens", line=1, args="if"), "if ((foo, bar)):", 0),
         (Message("superfluous-parens", line=1, args="if"), "if (foo(bar)):", 0),
         (
             Message("superfluous-parens", line=1, args="return"),
             "return ((x for x in x))",
             0,
         ),
         (Message("superfluous-parens", line=1, args="not"), "not (foo)", 0),
         (Message("superfluous-parens", line=1, args="not"), "if not (foo):", 1),
         (Message("superfluous-parens", line=1, args="if"), "if (not (foo)):", 0),
         (Message("superfluous-parens", line=1, args="not"), "if (not (foo)):", 2),
         (
             Message("superfluous-parens", line=1, args="for"),
             "for (x) in (1, 2, 3):",
             0,
         ),
         (
             Message("superfluous-parens", line=1, args="if"),
             "if (1) in (1, 2, 3):",
             0,
         ),
     ]
     for msg, code, offset in cases:
         with self.assertAddsMessages(msg):
             self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
 def test_other_present_codetag(self):
     code = """a = 1
             # CODETAG
             # FIXME
             """
     with self.assertAddsMessages(Message(msg_id="fixme", line=2, args="CODETAG")):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 12
0
 def test_check_bad_coment_custom_suggestion_count(self):
     with self.assertAddsMessages(
         Message('wrong-spelling-in-comment', line=1,
                 args=('coment', '# bad coment',
                       '      ^^^^^^',
                       self._get_msg_suggestions('coment', count=2)))):
         self.checker.process_tokens(_tokenize_str("# bad coment"))
 def testValidTypingAnnotationEllipses(self):
     """Make sure ellipses in function typing annotation
     doesn't cause a false positive bad-whitespace message"""
     with self.assertNoMessages():
         self.checker.process_tokens(
             _tokenize_str("def foo(t: Tuple[str, ...] = None):\n")
         )
Esempio n. 14
0
 def test_non_ascii_bytes_literal(self):
     code = 'b"测试"'
     self._test_token_message(code, 'non-ascii-bytes-literal')
     for code in ("测试", u"测试", u'abcdef', b'\x80'):
         tokens = testutils._tokenize_str(code)
         with self.assertNoMessages():
             self.checker.process_tokens(tokens)
 def testCheckKeywordParensHandlesUnnecessaryParens(self):
     self.checker._keywords_with_parens = set()
     cases = [
         (Message('superfluous-parens', line=1, args='if'),
          'if (foo):', 0),
         (Message('superfluous-parens', line=1, args='if'),
          'if ((foo, bar)):', 0),
         (Message('superfluous-parens', line=1, args='if'),
          'if (foo(bar)):', 0),
         (Message('superfluous-parens', line=1, args='return'),
          'return ((x for x in x))', 0),
         (Message('superfluous-parens', line=1, args='not'),
          'not (foo)', 0),
         (Message('superfluous-parens', line=1, args='not'),
          'if not (foo):', 1),
         (Message('superfluous-parens', line=1, args='if'),
          'if (not (foo)):', 0),
         (Message('superfluous-parens', line=1, args='not'),
          'if (not (foo)):', 2),
         (Message('superfluous-parens', line=1, args='for'),
          'for (x) in (1, 2, 3):', 0),
         (Message('superfluous-parens', line=1, args='if'),
          'if (1) in (1, 2, 3):', 0),
         ]
     for msg, code, offset in cases:
         with self.assertAddsMessages(msg):
             self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
Esempio n. 16
0
 def test_fixme_with_message(self) -> None:
     code = """a = 1
             # FIXME message
             """
     with self.assertAddsMessages(
         MessageTest(msg_id="fixme", line=2, args="FIXME message")
     ):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 17
0
 def testNoSuperfluousParensWalrusOperatorIf(self):
     """Parenthesis change the meaning of assignment in the walrus operator
     and so are not superfluous:"""
     code = "if (odd := is_odd(i))"
     offset = 0
     with self.assertNoMessages():
         self.checker._check_keyword_parentheses(_tokenize_str(code),
                                                 offset)
 def test_absent_codetag(self):
     code = """a = 1
             # FIXME	                # FIXME
             # TODO	                # TODO
             # XXX	                # XXX
             """
     with self.assertNoMessages():
         self.checker.process_tokens(_tokenize_str(code))
 def test_fixme_with_message(self):
     code = """a = 1
             # FIXME message
             """
     with self.assertAddsMessages(
         Message(msg_id="fixme", line=2, args="FIXME message")
     ):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 20
0
    def testCheckIfArgsAreNotUnicode(self):
        self.checker._keywords_with_parens = set()
        cases = [('if (foo):', 0), ('assert (1 == 1)', 0)]

        for code, offset in cases:
            self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
            got = self.linter.release_messages()
            assert isinstance(got[-1].args, str)
 def testComma(self):
     with self.assertAddsMessages(
             Message(
                 "bad-whitespace",
                 line=1,
                 args=("No", "allowed", "before", "comma", "(a , b)\n   ^"),
             )):
         self.checker.process_tokens(_tokenize_str("(a , b)\n"))
Esempio n. 22
0
 def test_xxx_without_space(self) -> None:
     code = """a = 1
             #XXX
             """
     with self.assertAddsMessages(
             MessageTest(msg_id="fixme", line=2, args="XXX",
                         col_offset=17)):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 23
0
 def test_non_alphanumeric_codetag(self) -> None:
     code = """a = 1
             #???
             """
     with self.assertAddsMessages(
             MessageTest(msg_id="fixme", line=2, args="???",
                         col_offset=17)):
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 24
0
 def test_check_bad_coment(self):
     suggestions = self.checker.spelling_dict.suggest('coment')[:4]
     with self.assertAddsMessages(
             Message('wrong-spelling-in-comment',
                     line=1,
                     args=('coment', '# bad coment', '      ^^^^^^',
                           "'{0}'".format("' or '".join(suggestions))))):
         self.checker.process_tokens(_tokenize_str("# bad coment"))
Esempio n. 25
0
    def testFuturePrintStatementWithoutParensWarning(self):
        code = """from __future__ import print_function
print('Hello world!')
"""
        tree = astroid.parse(code)
        with self.assertNoMessages():
            self.checker.process_module(tree)
            self.checker.process_tokens(_tokenize_str(code))
Esempio n. 26
0
 def test_absent_codetag(self):
     code = """a = 1
             # FIXME	                # FIXME
             # TODO	                # TODO
             # XXX	                # XXX
             """
     with self.assertNoMessages():
         self.checker.process_tokens(_tokenize_str(code))
Esempio n. 27
0
    def testCheckIfArgsAreNotUnicode(self) -> None:
        cases = [("if (foo):", 0), ("assert (1 == 1)", 0)]

        for code, offset in cases:
            self.checker._check_keyword_parentheses(_tokenize_str(code),
                                                    offset)
            got = self.linter.release_messages()
            assert isinstance(got[-1].args, str)
    def testFuturePrintStatementWithoutParensWarning(self):
        code = """from __future__ import print_function
print('Hello world!')
"""
        tree = astroid.parse(code)
        with self.assertNoMessages():
            self.checker.process_module(tree)
            self.checker.process_tokens(_tokenize_str(code))
    def testCheckIfArgsAreNotUnicode(self):
        self.checker._keywords_with_parens = set()
        cases = [(u'if (foo):', 0), (u'assert (1 == 1)', 0)]

        for code, offset in cases:
            self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
            got = self.linter.release_messages()
            assert isinstance(got[-1].args, str)
Esempio n. 30
0
    def testEmptyLines(self):
        self.checker.config.no_space_check = []
        with self.assertAddsMessages(
            Message('trailing-whitespace', line=2)):
            self.checker.process_tokens(_tokenize_str('a = 1\n \nb = 2\n'))

        with self.assertAddsMessages(
            Message('trailing-whitespace', line=2)):
            self.checker.process_tokens(_tokenize_str('a = 1\n\t\nb = 2\n'))

        with self.assertAddsMessages(
            Message('trailing-whitespace', line=2)):
            self.checker.process_tokens(_tokenize_str('a = 1\n\v\nb = 2\n'))

        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('a = 1\n\f\nb = 2\n'))

        self.checker.config.no_space_check = ['empty-line']
        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('a = 1\n \nb = 2\n'))

        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('a = 1\n\t\nb = 2\n'))

        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('a = 1\n\v\nb = 2\n'))
    def testEmptyLines(self):
        self.checker.config.no_space_check = []
        with self.assertAddsMessages(
            Message('trailing-whitespace', line=2)):
            self.checker.process_tokens(_tokenize_str('a = 1\n \nb = 2\n'))

        with self.assertAddsMessages(
            Message('trailing-whitespace', line=2)):
            self.checker.process_tokens(_tokenize_str('a = 1\n\t\nb = 2\n'))

        with self.assertAddsMessages(
            Message('trailing-whitespace', line=2)):
            self.checker.process_tokens(_tokenize_str('a = 1\n\v\nb = 2\n'))

        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('a = 1\n\f\nb = 2\n'))

        self.checker.config.no_space_check = ['empty-line']
        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('a = 1\n \nb = 2\n'))

        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('a = 1\n\t\nb = 2\n'))

        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str('a = 1\n\v\nb = 2\n'))
 def testParenthesesGood(self):
     good_cases = [
         '(a)\n',
         '(a * (b + c))\n',
         '(#\n    a)\n',
         ]
     with self.assertNoMessages():
         for code in good_cases:
             self.checker.process_tokens(_tokenize_str(code))
 def testComma(self):
     with self.assertAddsMessages(
         Message(
             "bad-whitespace",
             line=1,
             args=("No", "allowed", "before", "comma", "(a , b)\n   ^"),
         )
     ):
         self.checker.process_tokens(_tokenize_str("(a , b)\n"))
    def test_old_octal_literal(self):
        for octal in ("045", "055", "075", "077", "076543"):
            self._test_token_message(octal, "old-octal-literal")

        # Make sure we are catching only octals.
        for non_octal in ("45", "00", "085", "08", "1"):
            tokens = testutils._tokenize_str(non_octal)
            with self.assertNoMessages():
                self.checker.process_tokens(tokens)
 def testOperatorSpacingGood(self):
     good_cases = [
         'a = b\n'
         'a < b\n'
         'a\n< b\n',
         ]
     with self.assertNoMessages():
         for code in good_cases:
             self.checker.process_tokens(_tokenize_str(code))
Esempio n. 36
0
 def testParenthesesGood(self):
     good_cases = [
         '(a)\n',
         '(a * (b + c))\n',
         '(#\n    a)\n',
         ]
     with self.assertNoMessages():
         for code in good_cases:
             self.checker.process_tokens(_tokenize_str(code))
Esempio n. 37
0
    def test_old_octal_literal(self):
        for octal in ("045", "055", "075", "077", "076543"):
            self._test_token_message(octal, "old-octal-literal")

        # Make sure we are catching only octals.
        for non_octal in ("45", "00", "085", "08", "1"):
            tokens = testutils._tokenize_str(non_octal)
            with self.assertNoMessages():
                self.checker.process_tokens(tokens)
Esempio n. 38
0
 def testOperatorSpacingGood(self):
     good_cases = [
         'a = b\n'
         'a < b\n'
         'a\n< b\n',
         ]
     with self.assertNoMessages():
         for code in good_cases:
             self.checker.process_tokens(_tokenize_str(code))
Esempio n. 39
0
    def test_encoding_token(self):
        """Make sure the encoding token doesn't change the checker's behavior

        _tokenize_str doesn't produce an encoding token, but
        reading a file does
        """
        with self.assertNoMessages():
            encoding_token = tokenize.TokenInfo(tokenize.ENCODING, "utf-8", (0, 0), (0, 0), '')
            tokens = [encoding_token] + _tokenize_str('if (\n        None):\n    pass\n')
            self.checker.process_tokens(tokens)
 def testKeywordSpacingGood(self):
     with self.assertNoMessages():
         self.checker.process_tokens(_tokenize_str('foo(foo=bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: int = bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: module.classname = bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: Dict[int, str] = bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: \'int\' = bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: Dict[int, \'str\'] = bar)\n'))
         self.checker.process_tokens(_tokenize_str('lambda x=1: x\n'))
Esempio n. 41
0
 def testKeywordSpacingGood(self):
     with self.assertNoMessages():
         self.checker.process_tokens(_tokenize_str('foo(foo=bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: int = bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: module.classname = bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: Dict[int, str] = bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: \'int\' = bar)\n'))
         self.checker.process_tokens(_tokenize_str('foo(foo: Dict[int, \'str\'] = bar)\n'))
         self.checker.process_tokens(_tokenize_str('lambda x=1: x\n'))
    def testKeywordSpacingBad(self):
        with self.assertAddsMessages(
                Message('bad-whitespace',
                        line=1,
                        args=('No', 'allowed', 'before',
                              'keyword argument assignment',
                              '(foo =bar)\n     ^'))):
            self.checker.process_tokens(_tokenize_str('(foo =bar)\n'))

        with self.assertAddsMessages(
                Message('bad-whitespace',
                        line=1,
                        args=('No', 'allowed', 'after',
                              'keyword argument assignment',
                              '(foo= bar)\n    ^'))):
            self.checker.process_tokens(_tokenize_str('(foo= bar)\n'))

        with self.assertAddsMessages(
                Message('bad-whitespace',
                        line=1,
                        args=('No', 'allowed', 'around',
                              'keyword argument assignment',
                              '(foo = bar)\n     ^'))):
            self.checker.process_tokens(_tokenize_str('(foo = bar)\n'))

        with self.assertAddsMessages(
                Message('bad-whitespace',
                        line=1,
                        args=('Exactly one', 'required', 'before',
                              'keyword argument assignment',
                              '(foo: int= bar)\n         ^'))):
            self.checker.process_tokens(_tokenize_str('(foo: int= bar)\n'))

        with self.assertAddsMessages(
                Message('bad-whitespace',
                        line=1,
                        args=('Exactly one', 'required', 'after',
                              'keyword argument assignment',
                              '(foo: int =bar)\n          ^'))):
            self.checker.process_tokens(_tokenize_str('(foo: int =bar)\n'))

        with self.assertAddsMessages(
                Message('bad-whitespace',
                        line=1,
                        args=('Exactly one', 'required', 'around',
                              'keyword argument assignment',
                              '(foo: int=bar)\n         ^'))):
            self.checker.process_tokens(_tokenize_str('(foo: int=bar)\n'))

        with self.assertAddsMessages(
                Message('bad-whitespace',
                        line=1,
                        args=('Exactly one', 'required', 'around',
                              'keyword argument assignment',
                              '(foo: List[int]=bar)\n               ^'))):
            self.checker.process_tokens(
                _tokenize_str('(foo: List[int]=bar)\n'))
Esempio n. 43
0
    def testParenthesesBad(self):
        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'after', 'bracket', '( a)\n^'))):
            self.checker.process_tokens(_tokenize_str('( a)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'before', 'bracket', '(a )\n   ^'))):
            self.checker.process_tokens(_tokenize_str('(a )\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'before', 'bracket', 'foo (a)\n    ^'))):
            self.checker.process_tokens(_tokenize_str('foo (a)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'before', 'bracket', '{1: 2} [1]\n       ^'))):
            self.checker.process_tokens(_tokenize_str('{1: 2} [1]\n'))
Esempio n. 44
0
 def testNoSuperfluousParensWalrusOperatorIf(self) -> None:
     """Parenthesis change the meaning of assignment in the walrus operator
     and so are not always superfluous:
     """
     cases = [
         ("if (odd := is_odd(i))\n"),
         ("not (foo := 5)\n"),
     ]
     for code in cases:
         with self.assertNoMessages():
             self.checker.process_tokens(_tokenize_str(code))
    def testOperatorSpacingBad(self):
        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'before', 'comparison', 'a< b\n ^'))):
            self.checker.process_tokens(_tokenize_str('a< b\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'after', 'comparison', 'a <b\n  ^'))):
            self.checker.process_tokens(_tokenize_str('a <b\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'around', 'comparison', 'a<b\n ^'))):
            self.checker.process_tokens(_tokenize_str('a<b\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'around', 'comparison', 'a<  b\n ^'))):
            self.checker.process_tokens(_tokenize_str('a<  b\n'))
Esempio n. 46
0
    def testOperatorSpacingBad(self):
        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'before', 'comparison', 'a< b\n ^'))):
            self.checker.process_tokens(_tokenize_str('a< b\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'after', 'comparison', 'a <b\n  ^'))):
            self.checker.process_tokens(_tokenize_str('a <b\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'around', 'comparison', 'a<b\n ^'))):
            self.checker.process_tokens(_tokenize_str('a<b\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'around', 'comparison', 'a<  b\n ^'))):
            self.checker.process_tokens(_tokenize_str('a<  b\n'))
    def testParenthesesBad(self):
        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'after', 'bracket', '( a)\n^'))):
            self.checker.process_tokens(_tokenize_str('( a)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'before', 'bracket', '(a )\n   ^'))):
            self.checker.process_tokens(_tokenize_str('(a )\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'before', 'bracket', 'foo (a)\n    ^'))):
            self.checker.process_tokens(_tokenize_str('foo (a)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'before', 'bracket', '{1: 2} [1]\n       ^'))):
            self.checker.process_tokens(_tokenize_str('{1: 2} [1]\n'))
Esempio n. 48
0
 def testSpacesAllowedInsideSlices(self):
     good_cases = [
         '[a:b]\n',
         '[a : b]\n',
         '[a : ]\n',
         '[:a]\n',
         '[:]\n',
         '[::]\n',
         ]
     with self.assertNoMessages():
         for code in good_cases:
             self.checker.process_tokens(_tokenize_str(code))
 def test_check_numerical_id(self):
     with self.assertAddsMessages(
             Message(
                 "numerical-message-id",
                 line=1,
                 args=(
                     "W1234",
                     "some-nice-symbol",
                 ),
             )):
         self.checker.process_tokens(
             _tokenize_str("# pylint: disable=W1234"))
 def testSpacesAllowedInsideSlices(self):
     good_cases = [
         '[a:b]\n',
         '[a : b]\n',
         '[a : ]\n',
         '[:a]\n',
         '[:]\n',
         '[::]\n',
         ]
     with self.assertNoMessages():
         for code in good_cases:
             self.checker.process_tokens(_tokenize_str(code))
 def test_dont_trigger_on_todoist(self):
     code = """
     # Todoist API: What is this task about?
     # Todoist API: Look up a task's due date
     # Todoist API: Look up a Project/Label/Task ID
     # Todoist API: Fetch all labels
     # Todoist API: "Name" value
     # Todoist API: Get a task's priority
     # Todoist API: Look up the Project ID a Task belongs to
     # Todoist API: Fetch all Projects
     # Todoist API: Fetch all Tasks
     """
     with self.assertNoMessages():
         self.checker.process_tokens(_tokenize_str(code))
 def test_check_bad_coment_custom_suggestion_count(self):
     with self.assertAddsMessages(
         Message(
             "wrong-spelling-in-comment",
             line=1,
             args=(
                 "coment",
                 "# bad coment",
                 "      ^^^^^^",
                 self._get_msg_suggestions("coment", count=2),
             ),
         )
     ):
         self.checker.process_tokens(_tokenize_str("# bad coment"))
    def testOperatorSpacingBad(self):
        with self.assertAddsMessages(
            Message(
                "bad-whitespace",
                line=1,
                args=("Exactly one", "required", "before", "comparison", "a< b\n ^"),
            )
        ):
            self.checker.process_tokens(_tokenize_str("a< b\n"))

        with self.assertAddsMessages(
            Message(
                "bad-whitespace",
                line=1,
                args=("Exactly one", "required", "after", "comparison", "a <b\n  ^"),
            )
        ):
            self.checker.process_tokens(_tokenize_str("a <b\n"))

        with self.assertAddsMessages(
            Message(
                "bad-whitespace",
                line=1,
                args=("Exactly one", "required", "around", "comparison", "a<b\n ^"),
            )
        ):
            self.checker.process_tokens(_tokenize_str("a<b\n"))

        with self.assertAddsMessages(
            Message(
                "bad-whitespace",
                line=1,
                args=("Exactly one", "required", "around", "comparison", "a<  b\n ^"),
            )
        ):
            self.checker.process_tokens(_tokenize_str("a<  b\n"))
    def testParenthesesBad(self):
        with self.assertAddsMessages(
            Message(
                "bad-whitespace",
                line=1,
                args=("No", "allowed", "after", "bracket", "( a)\n^"),
            )
        ):
            self.checker.process_tokens(_tokenize_str("( a)\n"))

        with self.assertAddsMessages(
            Message(
                "bad-whitespace",
                line=1,
                args=("No", "allowed", "before", "bracket", "(a )\n   ^"),
            )
        ):
            self.checker.process_tokens(_tokenize_str("(a )\n"))

        with self.assertAddsMessages(
            Message(
                "bad-whitespace",
                line=1,
                args=("No", "allowed", "before", "bracket", "foo (a)\n    ^"),
            )
        ):
            self.checker.process_tokens(_tokenize_str("foo (a)\n"))

        with self.assertAddsMessages(
            Message(
                "bad-whitespace",
                line=1,
                args=("No", "allowed", "before", "bracket", "{1: 2} [1]\n       ^"),
            )
        ):
            self.checker.process_tokens(_tokenize_str("{1: 2} [1]\n"))
Esempio n. 55
0
    def testKeywordSpacingBad(self):
        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'before', 'keyword argument assignment',
                          '(foo =bar)\n     ^'))):
            self.checker.process_tokens(_tokenize_str('(foo =bar)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'after', 'keyword argument assignment',
                          '(foo= bar)\n    ^'))):
            self.checker.process_tokens(_tokenize_str('(foo= bar)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('No', 'allowed', 'around', 'keyword argument assignment',
                          '(foo = bar)\n     ^'))):
            self.checker.process_tokens(_tokenize_str('(foo = bar)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'before', 'keyword argument assignment',
                          '(foo: int= bar)\n         ^'))):
            self.checker.process_tokens(_tokenize_str('(foo: int= bar)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'after', 'keyword argument assignment',
                          '(foo: int =bar)\n          ^'))):
            self.checker.process_tokens(_tokenize_str('(foo: int =bar)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'around', 'keyword argument assignment',
                          '(foo: int=bar)\n         ^'))):
            self.checker.process_tokens(_tokenize_str('(foo: int=bar)\n'))

        with self.assertAddsMessages(
            Message('bad-whitespace', line=1,
                    args=('Exactly one', 'required', 'around', 'keyword argument assignment',
                          '(foo: List[int]=bar)\n               ^'))):
            self.checker.process_tokens(_tokenize_str('(foo: List[int]=bar)\n'))
        # Regression test for #1831
        with self.assertNoMessages():
            self.checker.process_tokens(_tokenize_str("(arg: Tuple[\n    int, str] = None):\n"))
 def testCheckKeywordParensHandlesValidCases(self):
     self.checker._keywords_with_parens = set()
     cases = [
         'if foo:',
         'if foo():',
         'if (x and y) or z:',
         'assert foo()',
         'assert ()',
         'if (1, 2) in (3, 4):',
         'if (a or b) in c:',
         'return (x for x in x)',
         'if (x for x in x):',
         'for x in (x for x in x):',
         'not (foo or bar)',
         'not (foo or bar) and baz',
         ]
     with self.assertNoMessages():
         for code in cases:
             self.checker._check_keyword_parentheses(_tokenize_str(code), 0)
Esempio n. 57
0
 def test_skip_python_coding_comments(self):
     self.checker.process_tokens(_tokenize_str(
         '# -*- coding: utf-8 -*-'))
     assert self.linter.release_messages() == []
     self.checker.process_tokens(_tokenize_str(
         '# coding=utf-8'))
     assert self.linter.release_messages() == []
     self.checker.process_tokens(_tokenize_str(
         '# vim: set fileencoding=utf-8 :'))
     assert self.linter.release_messages() == []
     # Now with a shebang first
     self.checker.process_tokens(_tokenize_str(
         '#!/usr/bin/env python\n# -*- coding: utf-8 -*-'))
     assert self.linter.release_messages() == []
     self.checker.process_tokens(_tokenize_str(
         '#!/usr/bin/env python\n# coding=utf-8'))
     assert self.linter.release_messages() == []
     self.checker.process_tokens(_tokenize_str(
         '#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :'))
     assert self.linter.release_messages() == []
Esempio n. 58
0
 def test_skip_urls(self):
     self.checker.process_tokens(_tokenize_str('# https://github.com/rfk/pyenchant'))
     assert self.linter.release_messages() == []
Esempio n. 59
0
 def test_skip_shebangs(self):
     self.checker.process_tokens(_tokenize_str('#!/usr/bin/env python'))
     assert self.linter.release_messages() == []