Пример #1
0
def test_basic_parsing():
    def compare(string):
        """Generates the AST object and then regenerates the code."""
        assert Parser(load_grammar(), string).module.get_code() == string

    compare(u('\na #pass\n'))
    compare(u('wblabla* 1\t\n'))
    compare(u('def x(a, b:3): pass\n'))
    compare(u('assert foo\n'))
Пример #2
0
def get_stack_at_position(grammar, code_lines, module, pos):
    """
    Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
    """
    user_stmt = module.get_statement_for_position(pos)

    if user_stmt is not None and user_stmt.type in ('indent', 'dedent'):
        code = u('')
    else:
        if user_stmt is None:
            user_stmt = module.get_leaf_for_position(pos,
                                                     include_prefixes=True)
        if pos <= user_stmt.start_pos:
            try:
                leaf = user_stmt.get_previous_leaf()
            except IndexError:
                pass
            else:
                user_stmt = module.get_statement_for_position(leaf.start_pos)

        if user_stmt.type == 'error_leaf' or user_stmt.type == 'string':
            # Error leafs cannot be parsed, completion in strings is also
            # impossible.
            raise OnErrorLeaf(user_stmt)

        code = _get_code(code_lines, user_stmt.start_pos, pos)
        if code == ';':
            # ; cannot be parsed.
            code = u('')

        # Remove whitespace at the end. Necessary, because the tokenizer will parse
        # an error token (there's no new line at the end in our case). This doesn't
        # alter any truth about the valid tokens at that position.
        code = code.strip('\t ')

    class EndMarkerReached(Exception):
        pass

    def tokenize_without_endmarker(code):
        tokens = tokenize.source_tokens(code, use_exact_op_types=True)
        for token_ in tokens:
            if token_[0] == token.ENDMARKER:
                raise EndMarkerReached()
            elif token_[0] == token.DEDENT:
                # Ignore those. Error statements should not contain them, if
                # they do it's for cases where an indentation happens and
                # before the endmarker we still see them.
                pass
            else:
                yield token_

    p = parser.Parser(grammar, code, start_parsing=False)
    try:
        p.parse(tokenizer=tokenize_without_endmarker(code))
    except EndMarkerReached:
        return Stack(p.stack)
Пример #3
0
def test_unicode_attribute(Script):
    """ github jedi-vim issue #94 """
    s1 = u('#-*- coding: utf-8 -*-\nclass Person():\n'
           '    name = "e"\n\nPerson().name.')
    completions1 = Script(s1).completions()
    assert 'strip' in [c.name for c in completions1]
    s2 = u('#-*- coding: utf-8 -*-\nclass Person():\n'
           '    name = "é"\n\nPerson().name.')
    completions2 = Script(s2).completions()
    assert 'strip' in [c.name for c in completions2]
Пример #4
0
def test_multibyte_script(Script):
    """ `jedi.Script` must accept multi-byte string source. """
    try:
        code = u("import datetime; datetime.d")
        comment = u("# multi-byte comment あいうえおä")
        s = (u('%s\n%s') % (code, comment)).encode('utf-8')
    except NameError:
        pass  # python 3 has no unicode method
    else:
        assert len(Script(s, 1, len(code)).completions())
Пример #5
0
def get_stack_at_position(grammar, code_lines, module, pos):
    """
    Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
    """
    user_stmt = module.get_statement_for_position(pos)

    if user_stmt is not None and user_stmt.type in ('indent', 'dedent'):
        code = u('')
    else:
        if user_stmt is None:
            user_stmt = module.get_leaf_for_position(pos, include_prefixes=True)
        if pos <= user_stmt.start_pos:
            try:
                leaf = user_stmt.get_previous_leaf()
            except IndexError:
                pass
            else:
                user_stmt = module.get_statement_for_position(leaf.start_pos)

        if user_stmt.type == 'error_leaf' or user_stmt.type == 'string':
            # Error leafs cannot be parsed, completion in strings is also
            # impossible.
            raise OnErrorLeaf(user_stmt)

        code = _get_code(code_lines, user_stmt.start_pos, pos)
        if code == ';':
            # ; cannot be parsed.
            code = u('')

        # Remove whitespace at the end. Necessary, because the tokenizer will parse
        # an error token (there's no new line at the end in our case). This doesn't
        # alter any truth about the valid tokens at that position.
        code = code.strip('\t ')

    class EndMarkerReached(Exception):
        pass

    def tokenize_without_endmarker(code):
        tokens = tokenize.source_tokens(code, use_exact_op_types=True)
        for token_ in tokens:
            if token_[0] == token.ENDMARKER:
                raise EndMarkerReached()
            elif token_[0] == token.DEDENT:
                # Ignore those. Error statements should not contain them, if
                # they do it's for cases where an indentation happens and
                # before the endmarker we still see them.
                pass
            else:
                yield token_

    p = parser.Parser(grammar, code, start_parsing=False)
    try:
        p.parse(tokenizer=tokenize_without_endmarker(code))
    except EndMarkerReached:
        return Stack(p.stack)
Пример #6
0
def test_module():
    module = Parser(load_grammar(), u("asdf"), "example.py").module
    name = module.name
    assert str(name) == "example"
    assert name.start_pos == (1, 0)
    assert name.end_pos == (1, 7)

    module = Parser(load_grammar(), u("asdf")).module
    name = module.name
    assert str(name) == ""
    assert name.start_pos == (1, 0)
    assert name.end_pos == (1, 0)
Пример #7
0
def test_module():
    module = Parser(load_grammar(), u('asdf'), 'example.py').module
    name = module.name
    assert str(name) == 'example'
    assert name.start_pos == (1, 0)
    assert name.end_pos == (1, 7)

    module = Parser(load_grammar(), u('asdf')).module
    name = module.name
    assert str(name) == ''
    assert name.start_pos == (1, 0)
    assert name.end_pos == (1, 0)
Пример #8
0
def test_module():
    module = ParserWithRecovery(load_grammar(), u('asdf'), 'example.py').module
    name = module.name
    assert str(name) == 'example'
    assert name.start_pos == (1, 0)
    assert name.end_pos == (1, 7)

    module = ParserWithRecovery(load_grammar(), u('asdf')).module
    name = module.name
    assert str(name) == ''
    assert name.start_pos == (1, 0)
    assert name.end_pos == (1, 0)
Пример #9
0
def test_module():
    module = Parser(u('asdf'), 'example.py', no_docstr=True).module
    name = module.name
    assert str(name) == 'example'
    assert name.start_pos == (0, 0)
    assert name.end_pos == (0, 7)

    module = Parser(u('asdf'), no_docstr=True).module
    name = module.name
    assert str(name) == ''
    assert name.start_pos == (0, 0)
    assert name.end_pos == (0, 0)
Пример #10
0
def test_module():
    module = Parser(u('asdf'), 'example.py', no_docstr=True).module
    name = module.name
    assert str(name) == 'example'
    assert name.start_pos == (0, 0)
    assert name.end_pos == (0, 7)

    module = Parser(u('asdf'), no_docstr=True).module
    name = module.name
    assert str(name) == ''
    assert name.start_pos == (0, 0)
    assert name.end_pos == (0, 0)
Пример #11
0
def test_hex_values_in_docstring():
    source = r'''
        def foo(object):
            """
             \xff
            """
            return 1
        '''

    doc = Parser(load_grammar(), dedent(u(source))).module.subscopes[0].raw_doc
    if is_py3:
        assert doc == '\xff'
    else:
        assert doc == u('�')
Пример #12
0
def test_hex_values_in_docstring():
    source = r'''
        def foo(object):
            """
             \xff
            """
            return 1
        '''

    doc = Parser(load_grammar(), dedent(u(source))).module.subscopes[0].raw_doc
    if is_py3:
        assert doc == '\xff'
    else:
        assert doc == u('�')
Пример #13
0
def find_return_types(module_context, func):
    """
    Determines a set of potential return types for `func` using docstring hints
    :type evaluator: jedi.evaluate.Evaluator
    :type param: jedi.parser.tree.Param
    :rtype: list
    >>> from jedi.evaluate.docstrings import *  # NOQA
    >>> from jedi.evaluate.docstrings import _search_param_in_docstr
    >>> from jedi.evaluate.docstrings import _evaluate_for_statement_string
    >>> from jedi.evaluate.docstrings import _search_return_in_gooogledocstr
    >>> from jedi.evaluate.docstrings import _search_return_in_numpydocstr
    >>> from jedi._compatibility import builtins
    >>> source = open(jedi.evaluate.docstrings.__file__.replace('.pyc', '.py'), 'r').read()
    >>> script = jedi.Script(source)
    >>> evaluator = script._evaluator
    >>> func = script._get_module().names_dict['find_return_types'][0].parent
    >>> types = find_return_types(evaluator, func)
    >>> print('types = %r' % (types,))
    >>> assert len(types) == 1
    >>> assert types[0].base.obj is builtins.list
    """
    def search_return_in_docstr(docstr):
        # Check for Sphinx/Epydoc return hint
        for p in DOCSTRING_RETURN_PATTERNS:
            match = p.search(docstr)
            if match:
                return [_strip_rst_role(match.group(1))]
        found = []

        if not found:
            # Check for numpy style return hint
            found = _search_return_in_numpydocstr(docstr)
        return found

    try:
        docstr = u(func.raw_doc)
    except AttributeError:
        docstr = u(func.doc)
    types = []
    for type_str in search_return_in_docstr(docstr):
        if is_module_installed('jedi', '>=0.10.0;<0.11'):
            type_ = _evaluate_for_statement_string(module_context, type_str)
        else:
            module = func.get_parent_until()
            type_ = _evaluate_for_statement_string(module_context, type_str,
                                                   module)
        types.extend(type_)
    return types
Пример #14
0
 def test_end_pos_one_line(self):
     parsed = Parser(load_grammar(), dedent(u('''
     def testit():
         a = "huhu"
     ''')))
     tok = parsed.module.subscopes[0].statements[0].children[2]
     assert tok.end_pos == (3, 14)
Пример #15
0
 def test_function_whitespace(self):
     # Test function definition whitespace identification
     fundef = dedent(
         u(
             """
     def test_whitespace(*args, **kwargs):
         x = 1
         if x > 0:
             print(True)
     """
         )
     )
     fundef_io = StringIO(fundef)
     tokens = tokenize.generate_tokens(fundef_io.readline)
     token_list = list(tokens)
     for _, value, _, prefix in token_list:
         if value == "test_whitespace":
             assert prefix == " "
         if value == "(":
             assert prefix == ""
         if value == "*":
             assert prefix == ""
         if value == "**":
             assert prefix == " "
         if value == "print":
             assert prefix == "        "
         if value == "if":
             assert prefix == "    "
Пример #16
0
    def check_call_for_usage(call):
        stmt = call.parent
        while not isinstance(stmt.parent, pr.IsScope):
            stmt = stmt.parent
        # New definition, call cannot be a part of stmt
        if len(call.name) == 1 and call.execution is None \
                and call.name in stmt.get_defined_names():
            # Class params are not definitions (like function params). They
            # are super classes, that need to be resolved.
            if not (isinstance(stmt, pr.Param) and isinstance(stmt.parent, pr.Class)):
                return

        follow = []  # There might be multiple search_name's in one call_path
        call_path = list(call.generate_call_path())
        for i, name in enumerate(call_path):
            # name is `pr.NamePart`.
            if u(name) == search_name:
                follow.append(call_path[:i + 1])

        for call_path in follow:
            follow_res, search = evaluator.goto(call.parent, call_path)
            # names can change (getattr stuff), therefore filter names that
            # don't match `search`.

            # TODO add something like that in the future - for now usages are
            # completely broken anyway.
            #follow_res = [r for r in follow_res if str(r) == search]
            #print search.start_pos,search_name.start_pos
            #print follow_res, search, search_name, [(r, r.start_pos) for r in follow_res]
            follow_res = usages_add_import_modules(evaluator, follow_res, search)

            compare_follow_res = compare_array(follow_res)
            # compare to see if they match
            if any(r in compare_definitions for r in compare_follow_res):
                yield classes.Definition(evaluator, search)
Пример #17
0
    def test_end_pos_one_line(self):
        parsed = parser.Parser(parser.load_grammar(), u('''
def testit():
    a = "huhu"
'''))
        tok = parsed.module.subscopes[0].statements[0].children[2]
        assert tok.end_pos == (3, 14)
Пример #18
0
    def test_end_pos_one_line(self):
        parsed = parser.Parser(u('''
def testit():
    a = "huhu"
'''))
        tok = parsed.module.subscopes[0].statements[0]._token_list[2]
        self.assertEqual(tok.end_pos, (3, 14))
Пример #19
0
def test_newline_positions():
    endmarker = ParserWithRecovery(load_grammar(),
                                   u('a\n')).module.children[-1]
    assert endmarker.end_pos == (2, 0)
    new_line = endmarker.get_previous_leaf()
    assert new_line.start_pos == (1, 1)
    assert new_line.end_pos == (2, 0)
Пример #20
0
def test_get_code():
    """Use the same code that the parser also generates, to compare"""
    s = u(
        '''"""a docstring"""
class SomeClass(object, mixin):
    def __init__(self):
        self.xy = 3.0
        """statement docstr"""
    def some_method(self):
        return 1
    def yield_method(self):
        while hasattr(self, 'xy'):
            yield True
        for x in [1, 2]:
            yield x
    def empty(self):
        pass
class Empty:
    pass
class WithDocstring:
    """class docstr"""
    pass
def method_with_docstring():
    """class docstr"""
    pass
'''
    )
    assert ParserWithRecovery(load_grammar(), s).module.get_code() == s
Пример #21
0
def test_explicit_absolute_imports():
    """
    Detect modules with ``from __future__ import absolute_import``.
    """
    parser = Parser(load_grammar(),
                    u("from __future__ import absolute_import"), "test.py")
    assert parser.module.has_explicit_absolute_import
Пример #22
0
 def check(literal):
     io = StringIO(u(literal))
     tokens = tokenize.generate_tokens(io.readline)
     token_list = list(tokens)
     typ, result_literal, _, _ = token_list[0]
     assert typ == STRING
     assert result_literal == literal
Пример #23
0
 def test_end_pos(self):
     # jedi issue #150
     s = u("x()\nx( )\nx(  )\nx (  )")
     parser = Parser(s)
     for i, s in enumerate(parser.module.statements, 3):
         for c in s.expression_list():
             self.assertEqual(c.execution.end_pos[1], i)
Пример #24
0
 def test_end_pos_one_line(self):
     parsed = ParserWithRecovery(load_grammar(), dedent(u('''
     def testit():
         a = "huhu"
     ''')))
     tok = parsed.module.subscopes[0].statements[0].children[2]
     assert tok.end_pos == (3, 14)
Пример #25
0
 def raw_doc(self):
     """ Returns a cleaned version of the docstring token. """
     try:
         # Returns a literal cleaned version of the ``Token``.
         return unicode(cleandoc(literal_eval(self._doc_token.string)))
     except AttributeError:
         return u('')
Пример #26
0
    def fp(src):
        p = FastParser(u(src))
        cache.save_parser(None, None, p, pickling=False)

        # TODO Don't change get_code, the whole thing should be the same.
        # -> Need to refactor the parser first, though.
        assert src == p.module.get_code()[:-1]
Пример #27
0
def test_get_code():
    """Use the same code that the parser also generates, to compare"""
    s = u('''"""a docstring"""
class SomeClass(object, mixin):
    def __init__(self):
        self.xy = 3.0
        """statement docstr"""
    def some_method(self):
        return 1
    def yield_method(self):
        while hasattr(self, 'xy'):
            yield True
        for x in [1, 2]:
            yield x
    def empty(self):
        pass
class Empty:
    pass
class WithDocstring:
    """class docstr"""
    pass
def method_with_docstring():
    """class docstr"""
    pass
''')
    assert ParserWithRecovery(load_grammar(), s).module.get_code() == s
Пример #28
0
def test_unicode_script(Script):
    """ normally no unicode objects are being used. (<=2.7) """
    s = unicode("import datetime; datetime.timedelta")
    completions = Script(s).completions()
    assert len(completions)
    assert type(completions[0].description) is unicode

    s = u("author='öä'; author")
    completions = Script(s).completions()
    x = completions[0].description
    assert type(x) is unicode

    s = u("#-*- coding: iso-8859-1 -*-\nauthor='öä'; author")
    s = s.encode('latin-1')
    completions = Script(s).completions()
    assert type(completions[0].description) is unicode
def test_path_from_sys_path_assignment():
    SRC = dedent(
        u(
            """
        #!/usr/bin/python

        import sys
        sys.path[0:0] = [
          '/usr/lib/python3.4/site-packages',
          '/home/test/.buildout/eggs/important_package.egg'
          ]

        path[0:0] = [1]

        import important_package

        if __name__ == '__main__':
            sys.exit(important_package.main())"""
        )
    )
    grammar = load_grammar()
    p = ParserWithRecovery(grammar, SRC)
    paths = _check_module(Evaluator(grammar), p.module)
    assert 1 not in paths
    assert "/home/test/.buildout/eggs/important_package.egg" in paths
Пример #30
0
 def check(literal):
     io = StringIO(u(literal))
     tokens = tokenize.generate_tokens(io.readline)
     token_list = list(tokens)
     typ, result_literal, _, _ = token_list[0]
     assert typ == STRING
     assert result_literal == literal
Пример #31
0
    def test_end_pos_one_line(self):
        parsed = parser.Parser(u('''
def testit():
    a = "huhu"
'''))
        tok = parsed.module.subscopes[0].statements[0]._token_list[2]
        self.assertEqual(tok.end_pos, (3, 14))
Пример #32
0
 def test_function_whitespace(self):
     # Test function definition whitespace identification
     fundef = dedent(
         u('''
     def test_whitespace(*args, **kwargs):
         x = 1
         if x > 0:
             print(True)
     '''))
     fundef_io = StringIO(fundef)
     tokens = tokenize.generate_tokens(fundef_io.readline)
     token_list = list(tokens)
     for _, value, _, prefix in token_list:
         if value == 'test_whitespace':
             assert prefix == ' '
         if value == '(':
             assert prefix == ''
         if value == '*':
             assert prefix == ''
         if value == '**':
             assert prefix == ' '
         if value == 'print':
             assert prefix == '        '
         if value == 'if':
             assert prefix == '    '
Пример #33
0
def find_return_types(module_context, func):
    """
    Determines a set of potential return types for `func` using docstring hints
    :type evaluator: jedi.evaluate.Evaluator
    :type param: jedi.parser.tree.Param
    :rtype: list
    >>> from jedi.evaluate.docstrings import *  # NOQA
    >>> from jedi.evaluate.docstrings import _search_param_in_docstr
    >>> from jedi.evaluate.docstrings import _evaluate_for_statement_string
    >>> from jedi.evaluate.docstrings import _search_return_in_gooogledocstr
    >>> from jedi.evaluate.docstrings import _search_return_in_numpydocstr
    >>> from jedi._compatibility import builtins
    >>> source = open(jedi.evaluate.docstrings.__file__.replace('.pyc', '.py'), 'r').read()
    >>> script = jedi.Script(source)
    >>> evaluator = script._evaluator
    >>> func = script._get_module().names_dict['find_return_types'][0].parent
    >>> types = find_return_types(evaluator, func)
    >>> print('types = %r' % (types,))
    >>> assert len(types) == 1
    >>> assert types[0].base.obj is builtins.list
    """
    def search_return_in_docstr(docstr):
        # Check for Sphinx/Epydoc return hint
        for p in DOCSTRING_RETURN_PATTERNS:
            match = p.search(docstr)
            if match:
                return [_strip_rst_role(match.group(1))]
        found = []

        if not found:
            # Check for numpy style return hint
            found = _search_return_in_numpydocstr(docstr)
        return found
    try:
        docstr = u(func.raw_doc)
    except AttributeError:
        docstr = u(func.doc)
    types = []
    for type_str in search_return_in_docstr(docstr):
        if is_module_installed('jedi', '>=0.10.0;<0.11'):
            type_ = _evaluate_for_statement_string(module_context, type_str)
        else:
            module = func.get_parent_until()
            type_ = _evaluate_for_statement_string(module_context,
                                                   type_str, module)
        types.extend(type_)
    return types
Пример #34
0
def test_dont_break_imports_without_namespaces():
    """
    The code checking for ``from __future__ import absolute_import`` shouldn't
    assume that all imports have non-``None`` namespaces.
    """
    src = u("from __future__ import absolute_import\nimport xyzzy")
    parser = Parser(load_grammar(), src, "test.py")
    assert parser.module.has_explicit_absolute_import
Пример #35
0
def _get_code_for_stack(code_lines, module_node, position):
    leaf = module_node.get_leaf_for_position(position, include_prefixes=True)
    # It might happen that we're on whitespace or on a comment. This means
    # that we would not get the right leaf.
    if leaf.start_pos >= position:
        if _is_on_comment(leaf, position):
            return u('')

        # If we're not on a comment simply get the previous leaf and proceed.
        try:
            leaf = leaf.get_previous_leaf()
        except IndexError:
            return u('')  # At the beginning of the file.

    is_after_newline = leaf.type == 'newline'
    while leaf.type == 'newline':
        try:
            leaf = leaf.get_previous_leaf()
        except IndexError:
            return u('')

    if leaf.type == 'error_leaf' or leaf.type == 'string':
        if leaf.start_pos[0] < position[0]:
            # On a different line, we just begin anew.
            return u('')

        # Error leafs cannot be parsed, completion in strings is also
        # impossible.
        raise OnErrorLeaf(leaf)
    else:
        if leaf == ';':
            user_stmt = leaf.parent
        else:
            user_stmt = leaf.get_definition()
        if user_stmt.parent.type == 'simple_stmt':
            user_stmt = user_stmt.parent

        if is_after_newline:
            if user_stmt.start_pos[1] > position[1]:
                # This means that it's actually a dedent and that means that we
                # start without context (part of a suite).
                return u('')

        # This is basically getting the relevant lines.
        return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(),
                         position)
Пример #36
0
    def test_end_pos_multi_line(self):
        parsed = parser.Parser(u('''
def testit():
    a = """huhu
asdfasdf""" + "h"
'''))
        tok = parsed.module.subscopes[0].statements[0]._token_list[2]
        self.assertEqual(tok.end_pos, (4, 11))
Пример #37
0
def dbg(message, *args):
    """ Looks at the stack, to see if a debug message should be printed. """
    if debug_function and enable_notice:
        frm = inspect.stack()[1]
        mod = inspect.getmodule(frm[0])
        if not (mod.__name__ in ignored_modules):
            i = ' ' * _debug_indent
            debug_function(NOTICE, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args))
Пример #38
0
def test_round_trip():
    source = dedent('''
    def x():
        """hahaha"""
    func''')

    f = FastParser(load_grammar(), u(source))
    assert f.get_parsed_node().get_code() == source
Пример #39
0
def test_dont_break_imports_without_namespaces():
    """
    The code checking for ``from __future__ import absolute_import`` shouldn't
    assume that all imports have non-``None`` namespaces.
    """
    src = u("from __future__ import absolute_import\nimport xyzzy")
    parser = ParserWithRecovery(load_grammar(), src, "test.py")
    assert parser.module.has_explicit_absolute_import
Пример #40
0
def test_path_from_invalid_sys_path_assignment():
    SRC = u("""
import sys
sys.path = 'invalid'""")
    p = Parser(SRC)
    paths = _check_module(p.module)
    assert len(paths) > 0
    assert 'invalid' not in paths
Пример #41
0
def test_user_statement_on_import():
    """github #285"""
    s = u("from datetime import (\n" "    time)")

    for pos in [(2, 1), (2, 4)]:
        p = UserContextParser(load_grammar(), s, None, pos, None).user_stmt()
        assert isinstance(p, pt.Import)
        assert [str(n) for n in p.get_defined_names()] == ["time"]
Пример #42
0
 def test_end_pos_multi_line(self):
     parsed = ParserWithRecovery(load_grammar(), dedent(u('''
     def testit():
         a = """huhu
     asdfasdf""" + "h"
     ''')))
     tok = parsed.module.subscopes[0].statements[0].children[2].children[0]
     assert tok.end_pos == (4, 11)
Пример #43
0
def test_path_from_invalid_sys_path_assignment():
    SRC = u("""
import sys
sys.path = 'invalid'""")
    p = Parser(SRC)
    paths = _check_module(p.module)
    assert len(paths) > 0
    assert 'invalid' not in paths
Пример #44
0
def dbg(message, *args):
    """ Looks at the stack, to see if a debug message should be printed. """
    if debug_function and enable_notice:
        frm = inspect.stack()[1]
        mod = inspect.getmodule(frm[0])
        if not (mod.__name__ in ignored_modules):
            i = ' ' * _debug_indent
            debug_function(NOTICE, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args))
def test_path_from_invalid_sys_path_assignment():
    code = dedent(u("""
        import sys
        sys.path = 'invalid'"""))

    paths = check_module_test(code)
    assert len(paths) > 0
    assert 'invalid' not in paths
Пример #46
0
 def test_end_pos_multi_line(self):
     parsed = Parser(load_grammar(), dedent(u('''
     def testit():
         a = """huhu
     asdfasdf""" + "h"
     ''')))
     tok = parsed.module.subscopes[0].statements[0].children[2].children[0]
     assert tok.end_pos == (4, 11)
Пример #47
0
def test_user_statement_on_import():
    """github #285"""
    s = u("from datetime import (\n" "    time)")

    for pos in [(2, 1), (2, 4)]:
        p = UserContextParser(load_grammar(), s, None, pos, None).user_stmt()
        assert isinstance(p, pt.Import)
        assert [str(n) for n in p.get_defined_names()] == ['time']
Пример #48
0
def test_path_from_invalid_sys_path_assignment():
    code = dedent(u("""
        import sys
        sys.path = 'invalid'"""))

    paths = check_module_test(code)
    assert len(paths) > 0
    assert 'invalid' not in paths
Пример #49
0
def test_round_trip():
    source = dedent('''
    def x():
        """hahaha"""
    func''')

    f = FastParser(load_grammar(), u(source))
    assert f.get_parsed_node().get_code() == source
Пример #50
0
def _get_code_for_stack(code_lines, module_node, position):
    leaf = module_node.get_leaf_for_position(position, include_prefixes=True)
    # It might happen that we're on whitespace or on a comment. This means
    # that we would not get the right leaf.
    if leaf.start_pos >= position:
        if _is_on_comment(leaf, position):
            return u('')

        # If we're not on a comment simply get the previous leaf and proceed.
        try:
            leaf = leaf.get_previous_leaf()
        except IndexError:
            return u('')  # At the beginning of the file.

    is_after_newline = leaf.type == 'newline'
    while leaf.type == 'newline':
        try:
            leaf = leaf.get_previous_leaf()
        except IndexError:
            return u('')

    if leaf.type == 'error_leaf' or leaf.type == 'string':
        if leaf.start_pos[0] < position[0]:
            # On a different line, we just begin anew.
            return u('')

        # Error leafs cannot be parsed, completion in strings is also
        # impossible.
        raise OnErrorLeaf(leaf)
    else:
        if leaf == ';':
            user_stmt = leaf.parent
        else:
            user_stmt = leaf.get_definition()
        if user_stmt.parent.type == 'simple_stmt':
            user_stmt = user_stmt.parent

        if is_after_newline:
            if user_stmt.start_pos[1] > position[1]:
                # This means that it's actually a dedent and that means that we
                # start without context (part of a suite).
                return u('')

        # This is basically getting the relevant lines.
        return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)
Пример #51
0
def test_carriage_return_statements():
    source = u(dedent('''
        foo = 'ns1!'

        # this is a namespace package
    '''))
    source = source.replace('\n', '\r\n')
    stmt = Parser(source).module.statements[0]
    assert '#' not in stmt.get_code()
Пример #52
0
def warning(message, *args, **kwargs):
    format = kwargs.pop('format', True)
    assert not kwargs

    if debug_function and enable_warning:
        i = ' ' * _debug_indent
        if format:
            message = message % tuple(u(repr(a)) for a in args)
        debug_function('RED', i + 'warning: ' + message)
Пример #53
0
def test_user_statement_on_import():
    """github #285"""
    s = u("from datetime import (\n" "    time)")

    for pos in [(2, 1), (2, 4)]:
        p = ParserWithRecovery(load_grammar(), s)
        stmt = p.module.get_statement_for_position(pos)
        assert isinstance(stmt, pt.Import)
        assert [str(n) for n in stmt.get_defined_names()] == ['time']
Пример #54
0
 def test_simple_no_whitespace(self):
     # Test a simple one line string, no preceding whitespace
     simple_docstring = u('"""simple one line docstring"""')
     simple_docstring_io = StringIO(simple_docstring)
     tokens = tokenize.generate_tokens(simple_docstring_io.readline)
     token_list = list(tokens)
     _, value, _, prefix = token_list[0]
     assert prefix == ''
     assert value == '"""simple one line docstring"""'