def definition(correct, correct_start, path): should_be = set() for match in re.finditer('(?:[^ ]+)', correct): string = match.group(0) parser = parse(string, start_symbol='eval_input', error_recovery=False) parser.get_root_node().move(self.line_nr) element = parser.get_root_node() module_context = script._get_module() # The context shouldn't matter for the test results. user_context = get_user_scope(module_context, (self.line_nr, 0)) if user_context.api_type == 'function': user_context = user_context.get_function_execution() element.parent = user_context.tree_node results = evaluator.eval_element(user_context, element) if not results: raise Exception('Could not resolve %s on line %s' % (match.string, self.line_nr - 1)) should_be |= set( Definition(evaluator, r.name) for r in results) debug.dbg('Finished getting types', color='YELLOW') # Because the objects have different ids, `repr`, then compare. should = set(comparison(r) for r in should_be) return should
def test_incomplete_list_comprehension(): """ Shouldn't raise an error, same bug as #418. """ # With the old parser this actually returned a statement. With the new # parser only valid statements generate one. children = parse('(1 for def').children assert [c.type for c in children] == \ ['error_node', 'error_node', 'newline', 'endmarker']
def _load_faked_module(module): module_name = module.__name__ if module_name == '__builtin__' and not is_py3: module_name = 'builtins' try: return modules[module_name] except KeyError: path = os.path.dirname(os.path.abspath(__file__)) try: with open(os.path.join(path, 'fake', module_name) + '.pym') as f: source = f.read() except IOError: modules[module_name] = None return modules[module_name] = m = parse(unicode(source)) if module_name == 'builtins' and not is_py3: # There are two implementations of `open` for either python 2/3. # -> Rename the python2 version (`look at fake/builtins.pym`). open_func = _search_scope(m, 'open') open_func.children[1].value = 'open_python3' open_func = _search_scope(m, 'open_python2') open_func.children[1].value = 'open' return m
def node(self, request): parsed = parse(dedent(request.param[0])) request.keywords['expected'] = request.param[1] child = parsed.children[0] if child.type == 'simple_stmt': child = child.children[0] return child
def test_get_call_signature(code, call_signature): node = parse(code).children[0] if node.type == 'simple_stmt': node = node.children[0] assert parser_utils.get_call_signature(node) == call_signature assert parser_utils.get_doc_with_call_signature(node) == (call_signature + '\n\n')
def test_get_code(): """Use the same code that the parser also generates, to compare""" s = '''"""a docstring""" class SomeClass(object, mixin): def __init__(self): self.xy = 3.0 """statement docstr""" def some_method(self): return 1 def yield_method(self): while hasattr(self, 'xy'): yield True for x in [1, 2]: yield x def empty(self): pass class Empty: pass class WithDocstring: """class docstr""" pass def method_with_docstring(): """class docstr""" pass ''' assert parse(s).get_code() == s
def test_python2_octal(): module = parse('0660') first = module.children[0] if is_py3: assert first.type == 'error_node' else: assert first.children[0].type == 'number'
def test_end_pos_one_line(self): parsed = parse(dedent(''' def testit(): a = "huhu" ''')) tok = parsed.subscopes[0].statements[0].children[2] assert tok.end_pos == (3, 14)
def _fix_forward_reference(context, node): evaled_nodes = context.eval_node(node) if len(evaled_nodes) != 1: debug.warning("Eval'ed typing index %s should lead to 1 object, " " not %s" % (node, evaled_nodes)) return node evaled_node = list(evaled_nodes)[0] if isinstance(evaled_node, compiled.CompiledObject) and \ isinstance(evaled_node.obj, str): try: new_node = parse( _compatibility.unicode(evaled_node.obj), start_symbol='eval_input', error_recovery=False ) except ParserSyntaxError: debug.warning('Annotation not parsed: %s' % evaled_node.obj) return node else: module = node.get_root_node() new_node.move(module.end_pos[0]) new_node.parent = context.tree_node return new_node else: return node
def test_dont_break_imports_without_namespaces(): """ The code checking for ``from __future__ import absolute_import`` shouldn't assume that all imports have non-``None`` namespaces. """ src = "from __future__ import absolute_import\nimport xyzzy" assert parse(src).has_explicit_absolute_import()
def test_end_pos_line(self): # jedi issue #150 s = "x()\nx( )\nx( )\nx ( )" module = parse(s) for i, simple_stmt in enumerate(module.children[:-1]): expr_stmt = simple_stmt.children[0] assert expr_stmt.end_pos == (i + 1, i + 3)
def test_round_trip(): code = dedent(''' def x(): """hahaha""" func''') assert parse(code).get_code() == code
def test_end_pos_multi_line(self): parsed = parse(dedent(''' def testit(): a = """huhu asdfasdf""" + "h" ''')) tok = parsed.subscopes[0].statements[0].children[2].children[0] assert tok.end_pos == (4, 11)
def test_basic_parsing(): """Validate the parsing features""" m = parse(code_basic_features) diff_code_assert( code_basic_features, m.get_code() )
def test_end_pos_one_line(self): parsed = parse( dedent(''' def testit(): a = "huhu" ''')) tok = parsed.subscopes[0].statements[0].children[2] assert tok.end_pos == (3, 14)
def _get_module_node(self): return parse( code=self._source, path=self.path, grammar=self._grammar, cache=False, # No disk cache, because the current script often changes. diff_cache=True, )
def check_p(src, number_parsers_used, number_of_splits=None, number_of_misses=0): if number_of_splits is None: number_of_splits = number_parsers_used module_node = parse(src) assert src == module_node.get_code() return module_node
def test_end_pos_one_line(self): parsed = parse(dedent(''' def testit(): a = "huhu" ''')) simple_stmt = next(parsed.iter_funcdefs()).get_suite().children[-1] string = simple_stmt.children[0].get_rhs() assert string.end_pos == (3, 14)
def test_open_string_literal(code): """ Testing mostly if removing the last newline works. """ lines = splitlines(code, keepends=True) end_pos = (len(lines), len(lines[-1])) module = parse(code) assert module.get_code() == code assert module.end_pos == end_pos == module.children[1].end_pos
def test_carriage_return_statements(): source = dedent(''' foo = 'ns1!' # this is a namespace package ''') source = source.replace('\n', '\r\n') stmt = parse(source).children[0] assert '#' not in stmt.get_code()
def test_carriage_return_statements(): source = dedent(''' foo = 'ns1!' # this is a namespace package ''') source = source.replace('\n', '\r\n') stmt = parse(source).statements[0] assert '#' not in stmt.get_code()
def test_end_pos_multi_line(self): parsed = parse(dedent(''' def testit(): a = """huhu asdfasdf""" + "h" ''')) expr_stmt = next(parsed.iter_funcdefs()).get_suite().children[1].children[0] string_leaf = expr_stmt.get_rhs().children[0] assert string_leaf.end_pos == (4, 11)
def _load_module(evaluator, path, python_object): module = parse(grammar=evaluator.grammar, path=path, cache=True, diff_cache=True).get_root_node() python_module = inspect.getmodule(python_object) evaluator.modules[python_module.__name__] = module return module
def test_end_pos_one_line(self): parsed = parse( dedent(''' def testit(): a = "huhu" ''')) simple_stmt = next(parsed.iter_funcdefs()).get_suite().children[-1] string = simple_stmt.children[0].get_rhs() assert string.end_pos == (3, 14)
def test_end_pos_multi_line(self): parsed = parse( dedent(''' def testit(): a = """huhu asdfasdf""" + "h" ''')) tok = parsed.subscopes[0].statements[0].children[2].children[0] assert tok.end_pos == (4, 11)
def test_user_statement_on_import(): """github #285""" s = "from datetime import (\n" \ " time)" for pos in [(2, 1), (2, 4)]: p = parse(s) stmt = parser_utils.get_statement_of_position(p, pos) assert isinstance(stmt, tree.Import) assert [n.value for n in stmt.get_defined_names()] == ['time']
def test_end_pos(): s = dedent(''' x = ['a', 'b', 'c'] def func(): y = None ''') parser = parse(s) scope = next(parser.iter_funcdefs()) assert scope.start_pos == (3, 0) assert scope.end_pos == (5, 0)
def test_end_pos(): s = dedent(''' x = ['a', 'b', 'c'] def func(): y = None ''') parser = parse(s) scope = parser.subscopes[0] assert scope.start_pos == (3, 0) assert scope.end_pos == (5, 0)
def check(src, result): # Python 2 tuple params should be ignored for now. grammar = load_grammar('%s.%s' % sys.version_info[:2]) m = parse(src, grammar=grammar) if is_py3: assert not list(m.iter_funcdefs()) else: # We don't want b and c to be a part of the param enumeration. Just # ignore them, because it's not what we want to support in the # future. assert [param.name.value for param in next(m.iter_funcdefs()).params] == result
def test_sys_path_with_modifications(): code = dedent(""" import os """) path = os.path.abspath(os.path.join(os.curdir, 'module_name.py')) grammar = load_grammar() module_node = parse(code, path=path) module_context = ModuleContext(Evaluator(grammar), module_node, path=path) paths = sys_path_with_modifications(module_context.evaluator, module_context) assert '/tmp/.buildout/eggs/important_package.egg' in paths
def test_end_pos_multi_line(self): parsed = parse( dedent(''' def testit(): a = """huhu asdfasdf""" + "h" ''')) expr_stmt = next( parsed.iter_funcdefs()).get_suite().children[1].children[0] string_leaf = expr_stmt.get_rhs().children[0] assert string_leaf.end_pos == (4, 11)
def check(src, result): # Python 2 tuple params should be ignored for now. grammar = load_grammar('%s.%s' % sys.version_info[:2]) m = parse(src, grammar=grammar) if is_py3: assert not m.subscopes else: # We don't want b and c to be a part of the param enumeration. Just # ignore them, because it's not what we want to support in the # future. assert [str(param.name) for param in m.subscopes[0].params] == result
def _load_module(evaluator, path, python_object): module = parse( grammar=evaluator.grammar, path=path, cache=True, diff_cache=True ).get_root_node() python_module = inspect.getmodule(python_object) evaluator.modules[python_module.__name__] = module return module
def assert_params(param_string, **wanted_dct): source = dedent(''' def x(%s): pass ''') % param_string module = parse(source) funcdef = module.subscopes[0] dct = dict((p.name.value, p.default and p.default.get_code()) for p in funcdef.params) assert dct == wanted_dct assert module.get_code() == source
def test_end_pos_error_correction(): """ Source code without ending newline are given one, because the Python grammar needs it. However, they are removed again. We still want the right end_pos, even if something breaks in the parser (error correction). """ s = 'def x():\n .' m = parse(s) func = m.children[0] assert func.type == 'funcdef' assert func.end_pos == (2, 2) assert m.end_pos == (2, 2)
def _get_typing_replacement_module(): """ The idea is to return our jedi replacement for the PEP-0484 typing module as discussed at https://github.com/davidhalter/jedi/issues/663 """ global _typing_module if _typing_module is None: typing_path = \ os.path.abspath(os.path.join(__file__, "../jedi_typing.py")) with open(typing_path) as f: code = _compatibility.unicode(f.read()) _typing_module = parse(code) return _typing_module
def test_carriage_return_splitting(): source = u(dedent(''' "string" class Foo(): pass ''')) source = source.replace('\n', '\r\n') module = parse(source) assert [n.value for lst in module.used_names.values() for n in lst] == ['Foo']
def check(src, result): # Python 2 tuple params should be ignored for now. grammar = load_grammar('%s.%s' % sys.version_info[:2]) m = parse(src, grammar=grammar) if is_py3: assert not list(m.iter_funcdefs()) else: # We don't want b and c to be a part of the param enumeration. Just # ignore them, because it's not what we want to support in the # future. assert [ param.name.value for param in next(m.iter_funcdefs()).params ] == result
def _get_paths_from_buildout_script(evaluator, buildout_script_path): try: module_node = parse( path=buildout_script_path, grammar=evaluator.grammar, cache=True ) except IOError: debug.warning('Error trying to read buildout_script: %s', buildout_script_path) return from jedi.evaluate.representation import ModuleContext for path in _check_module(ModuleContext(evaluator, module_node, buildout_script_path)): yield path
def test_hex_values_in_docstring(): source = r''' def foo(object): """ \xff """ return 1 ''' doc = parser_utils.clean_scope_docstring(next(parse(source).iter_funcdefs())) if is_py3: assert doc == '\xff' else: assert doc == u('�')
def _load_module(evaluator, path=None, code=None, sys_path=None, parent_module=None): if sys_path is None: sys_path = evaluator.sys_path dotted_path = path and compiled.dotted_from_fs_path(path, sys_path) if path is not None and path.endswith(('.py', '.zip', '.egg')) \ and dotted_path not in settings.auto_import_modules: module_node = parse(code=code, path=path, cache=True, diff_cache=True) from jedi.evaluate.representation import ModuleContext return ModuleContext(evaluator, module_node, path=path) else: return compiled.load_module(evaluator, path)