def testParenthesesBad(self): with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('No', 'allowed', 'after', 'bracket', '( a)\n^'))): self.checker.process_tokens(tokenize_str('( a)\n')) with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('No', 'allowed', 'before', 'bracket', '(a )\n ^'))): self.checker.process_tokens(tokenize_str('(a )\n')) with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('No', 'allowed', 'before', 'bracket', 'foo (a)\n ^'))): self.checker.process_tokens(tokenize_str('foo (a)\n')) with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('No', 'allowed', 'before', 'bracket', '{1: 2} [1]\n ^'))): self.checker.process_tokens(tokenize_str('{1: 2} [1]\n'))
def testOperatorSpacingBad(self): with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('Exactly one', 'required', 'before', 'comparison', 'a< b\n ^'))): self.checker.process_tokens(tokenize_str('a< b\n')) with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('Exactly one', 'required', 'after', 'comparison', 'a <b\n ^'))): self.checker.process_tokens(tokenize_str('a <b\n')) with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('Exactly one', 'required', 'around', 'comparison', 'a<b\n ^'))): self.checker.process_tokens(tokenize_str('a<b\n')) with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('Exactly one', 'required', 'around', 'comparison', 'a< b\n ^'))): self.checker.process_tokens(tokenize_str('a< b\n'))
def testEmptyLines(self): self.checker.config.no_space_check = [] with self.assertAddsMessages(Message('trailing-whitespace', line=2)): self.checker.process_tokens(tokenize_str('a = 1\n \nb = 2\n')) self.checker.config.no_space_check = ['empty-line'] with self.assertNoMessages(): self.checker.process_tokens(tokenize_str('a = 1\n \nb = 2\n'))
def testTrailingCommaGood(self): with self.assertNoMessages(): self.checker.process_tokens(tokenize_str('(a, )\n')) self.checker.process_tokens(tokenize_str('(a,)\n')) self.checker.config.no_space_check = [] with self.assertNoMessages(): self.checker.process_tokens(tokenize_str('(a,)\n'))
def testEmptyLines(self): self.checker.config.no_space_check = [] with self.assertAddsMessages( Message('trailing-whitespace', line=2)): self.checker.process_tokens(tokenize_str('a = 1\n \nb = 2\n')) self.checker.config.no_space_check = ['empty-line'] with self.assertNoMessages(): self.checker.process_tokens(tokenize_str('a = 1\n \nb = 2\n'))
def test_double_tri_quote_string_literal_cfg_double(self): test_str = '''x = """test"""''' with self.assertNoMessages(): self.checker.process_tokens(tokenize_str(test_str)) self.checker.leave_module(None)
def test_check_bad_coment(self): with self.assertAddsMessages( Message('wrong-spelling-in-comment', line=1, args=('coment', '# bad coment', ' ^^^^^^', "comet' or 'comment' or 'moment' or 'foment"))): self.checker.process_tokens(tokenize_str("# bad coment"))
def test_single_tri_quote_string_literal_cfg_single(self): test_str = """x = '''test'''""" with self.assertNoMessages(): self.checker.process_tokens(tokenize_str(test_str)) self.checker.leave_module(None)
def testComma(self): with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('No', 'allowed', 'before', 'comma', '(a , b)\n ^'))): self.checker.process_tokens(tokenize_str('(a , b)\n'))
def test_check_bad_coment(self): suggestions = self.checker.spelling_dict.suggest('coment')[:4] with self.assertAddsMessages( Message('wrong-spelling-in-comment', line=1, args=('coment', '# bad coment', ' ^^^^^^', "'{0}'".format("' or '".join(suggestions))))): self.checker.process_tokens(tokenize_str("# bad coment"))
def testFuturePrintStatementWithoutParensWarning(self): code = """from __future__ import print_function print('Hello world!') """ tree = astroid.parse(code) with self.assertNoMessages(): self.checker.process_module(tree) self.checker.process_tokens(tokenize_str(code))
def test_old_octal_literal(self): for octal in ("045", "055", "075", "077", "076543"): self._test_token_message(octal, "old-octal-literal") # Make sure we are catching only octals. for non_octal in ("45", "00", "085", "08", "1"): tokens = testutils.tokenize_str(non_octal) with self.assertNoMessages(): self.checker.process_tokens(tokens)
def testKeywordSpacingBad(self): with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('No', 'allowed', 'before', 'keyword argument assignment', '(foo =bar)\n ^'))): self.checker.process_tokens(tokenize_str('(foo =bar)\n')) with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('No', 'allowed', 'after', 'keyword argument assignment', '(foo= bar)\n ^'))): self.checker.process_tokens(tokenize_str('(foo= bar)\n')) with self.assertAddsMessages( Message('bad-whitespace', line=1, args=('No', 'allowed', 'around', 'keyword argument assignment', '(foo = bar)\n ^'))): self.checker.process_tokens(tokenize_str('(foo = bar)\n'))
def test_double_quote_string_literal_cfg_single_with_escaping(self): test_str = '''x = "this is a \\"test\\" string"''' msg = Message(msg_id='invalid-string-quote', line=1, args=(Q_DOUB, Q_SING)) with self.assertAddsMessages(msg): self.checker.process_tokens(tokenize_str(test_str))
def test_single_quote_string_literal_cfg_double_with_escaping(self): test_str = """x = 'this is a \\'test\\' string'""" msg = Message(msg_id='invalid-string-quote', line=1, args=(Q_SING, Q_DOUB)) with self.assertAddsMessages(msg): self.checker.process_tokens(tokenize_str(test_str))
def testOperatorSpacingGood(self): good_cases = [ 'a = b\n' 'a < b\n' 'a\n< b\n', ] with self.assertNoMessages(): for code in good_cases: self.checker.process_tokens(tokenize_str(code))
def test_mixed_quote_string_literal_cfg_double(self): test_str = '''x = "test" + 'test' + "test"''' msg = Message(msg_id='invalid-string-quote', line=1, args=(Q_SING, Q_DOUB)) with self.assertAddsMessages(msg): self.checker.process_tokens(tokenize_str(test_str))
def testParenthesesGood(self): good_cases = [ '(a)\n', '(a * (b + c))\n', '(#\n a)\n', ] with self.assertNoMessages(): for code in good_cases: self.checker.process_tokens(tokenize_str(code))
def test_single_tri_quote_string_literal_cfg_double(self): test_str = """x = '''test'''""" msg = Message(msg_id='invalid-triple-quote', line=1, args=(TRI_Q_SING, TRI_Q_DOUB)) with self.assertAddsMessages(msg): self.checker.process_tokens(tokenize_str(test_str)) self.checker.leave_module(None)
def testSpacesAllowedInsideSlices(self): good_cases = [ '[a:b]\n', '[a : b]\n', '[a : ]\n', '[:a]\n', '[:]\n', '[::]\n', ] with self.assertNoMessages(): for code in good_cases: self.checker.process_tokens(tokenize_str(code))
def check_module(self, test_str, *messages): """Test that the module-level docstring is linted correctly. """ stmt = astroid.parse(test_str) if messages: ctx = self.assertAddsMessages(*messages) else: ctx = self.assertNoMessages() with ctx: self.checker.process_tokens(tokenize_str(test_str)) self.checker.visit_module(stmt)
def testKeywordSpacingGood(self): with self.assertNoMessages(): self.checker.process_tokens(tokenize_str('foo(foo=bar)\n')) self.checker.process_tokens(tokenize_str('foo(foo: int = bar)\n')) self.checker.process_tokens(tokenize_str('foo(foo: Dict[int, str] = bar)\n')) self.checker.process_tokens(tokenize_str('foo(foo: \'int\' = bar)\n')) self.checker.process_tokens(tokenize_str('foo(foo: Dict[int, \'str\'] = bar)\n')) self.checker.process_tokens(tokenize_str('lambda x=1: x\n'))
def test_multi_line_double_tri_quote_string_literal_cfg_single(self): test_str = '''x = """ this is a multi-line test string """''' msg = Message(msg_id='invalid-triple-quote', line=1, args=(TRI_Q_DOUB, TRI_Q_SING)) with self.assertAddsMessages(msg): self.checker.process_tokens(tokenize_str(test_str)) self.checker.leave_module(None)
def _check(self, test_str, visiter, *messages): """Method to perform the actual test check for those methods that utilize a visitor. """ stmt = astroid.extract_node(test_str) if messages: ctx = self.assertAddsMessages(*messages) else: ctx = self.assertNoMessages() with ctx: self.checker.process_tokens(tokenize_str(test_str)) visiter(stmt)
def testCheckKeywordParensHandlesValidCases(self): self.checker._keywords_with_parens = set() cases = [ 'if foo:', 'if foo():', 'if (x and y) or z:', 'assert foo()', 'assert ()', 'if (1, 2) in (3, 4):', 'if (a or b) in c:', 'return (x for x in x)', 'if (x for x in x):', 'for x in (x for x in x):', 'not (foo or bar)', 'not (foo or bar) and baz', ] with self.assertNoMessages(): for code in cases: self.checker._check_keyword_parentheses(tokenize_str(code), 0)
def testCheckKeywordParensHandlesUnnecessaryParens(self): self.checker._keywords_with_parens = set() cases = [ (Message('superfluous-parens', line=1, args='if'), 'if (foo):', 0), (Message('superfluous-parens', line=1, args='if'), 'if ((foo, bar)):', 0), (Message('superfluous-parens', line=1, args='if'), 'if (foo(bar)):', 0), (Message('superfluous-parens', line=1, args='return'), 'return ((x for x in x))', 0), (Message('superfluous-parens', line=1, args='not'), 'not (foo)', 0), (Message('superfluous-parens', line=1, args='not'), 'if not (foo):', 1), (Message('superfluous-parens', line=1, args='if'), 'if (not (foo)):', 0), (Message('superfluous-parens', line=1, args='not'), 'if (not (foo)):', 2), ] for msg, code, offset in cases: with self.assertAddsMessages(msg): self.checker._check_keyword_parentheses(tokenize_str(code), offset)
def test_skip_python_coding_comments(self): self.checker.process_tokens(tokenize_str( '# -*- coding: utf-8 -*-')) assert self.linter.release_messages() == [] self.checker.process_tokens(tokenize_str( '# coding=utf-8')) assert self.linter.release_messages() == [] self.checker.process_tokens(tokenize_str( '# vim: set fileencoding=utf-8 :')) assert self.linter.release_messages() == [] # Now with a shebang first self.checker.process_tokens(tokenize_str( '#!/usr/bin/env python\n# -*- coding: utf-8 -*-')) assert self.linter.release_messages() == [] self.checker.process_tokens(tokenize_str( '#!/usr/bin/env python\n# coding=utf-8')) assert self.linter.release_messages() == [] self.checker.process_tokens(tokenize_str( '#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :')) assert self.linter.release_messages() == []
def test_single_quote_string_literal_cfg_single(self): test_str = """x = 'test'""" with self.assertNoMessages(): self.checker.process_tokens(tokenize_str(test_str))
def _test_token_message(self, code, symbolic_message): tokens = testutils.tokenize_str(code) message = testutils.Message(symbolic_message, line=1) with self.assertAddsMessages(message): self.checker.process_tokens(tokens)
def test_skip_email_address(self): self.checker.process_tokens(tokenize_str('# [email protected]')) assert self.linter.release_messages() == []
def test_skip_words_with_numbers(self): self.checker.process_tokens(tokenize_str('\n# 0ne\n# Thr33\n# Sh3ll')) assert self.linter.release_messages() == []
def test_double_quote_string_literal_cfg_double_with_escaping(self): test_str = '''x = "this is a \\"test\\" string"''' with self.assertNoMessages(): self.checker.process_tokens(tokenize_str(test_str))
def test_skip_urls(self): self.checker.process_tokens(tokenize_str('# https://github.com/rfk/pyenchant')) assert self.linter.release_messages() == []
def test_skip_top_level_pylint_enable_disable_comments(self): self.checker.process_tokens(tokenize_str('# Line 1\n Line 2\n# pylint: disable=ungrouped-imports')) assert self.linter.release_messages() == []
def testKeywordSpacingGood(self): with self.assertNoMessages(): self.checker.process_tokens(tokenize_str('foo(foo=bar)\n')) self.checker.process_tokens(tokenize_str('lambda x=1: x\n'))
def test_skip_shebangs(self): self.checker.process_tokens(tokenize_str('#!/usr/bin/env python')) assert self.linter.release_messages() == []