def get_index_types(self, index=None): args = [] if index is None else [index] try: return self.execute_subscope_by_name('__getitem__', args) except KeyError: debug.warning('No __getitem__, cannot access the array.') return []
def _get_buildout_scripts(module_path): """ if there is a 'buildout.cfg' file in one of the parent directories of the given module it will return a list of all files in the buildout bin directory that look like python files. :param module_path: absolute path to the module. :type module_path: str """ project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg') if not project_root: return [] bin_path = os.path.join(project_root, 'bin') if not os.path.exists(bin_path): return [] extra_module_paths = [] for filename in os.listdir(bin_path): try: filepath = os.path.join(bin_path, filename) with open(filepath, 'r') as f: firstline = f.readline() if firstline.startswith('#!') and 'python' in firstline: extra_module_paths.append(filepath) except IOError as e: # either permission error or race cond. because file got deleted # ignore debug.warning(unicode(e)) continue return extra_module_paths
def execute(self, obj, arguments=(), trailer=None): if not isinstance(arguments, param.Arguments): arguments = param.Arguments(self, arguments, trailer) if self.is_analysis: arguments.eval_all() if obj.isinstance(er.Function): obj = obj.get_decorated_func() debug.dbg('execute: %s %s', obj, arguments) try: # Some stdlib functions like super(), namedtuple(), etc. have been # hard-coded in Jedi to support them. return stdlib.execute(self, obj, arguments) except stdlib.NotInStdLib: pass try: func = obj.py__call__ except AttributeError: debug.warning("no execution possible %s", obj) return set() else: types = func(arguments) debug.dbg('execute result: %s in %s', types, obj) return types
def iter_content(self): """ The index is here just ignored, because of all the appends, etc. lists/sets are too complicated too handle that. """ items = [] for stmt in self.var_args: for typ in evaluate.follow_statement(stmt): if isinstance(typ, er.Instance) and len(typ.var_args): array = typ.var_args[0] if isinstance(array, ArrayInstance): # prevent recursions # TODO compare Modules if self.var_args.start_pos != array.var_args.start_pos: items += array.iter_content() else: debug.warning('ArrayInstance recursion', self.var_args) continue items += evaluate.get_iterator_types([typ]) if self.var_args.parent is None: return [] # generated var_args should not be checked for arrays module = self.var_args.get_parent_until() is_list = str(self.instance.name) == 'list' items += _check_array_additions(self.instance, module, is_list) return items
def _fix_forward_reference(context, node): evaled_nodes = context.eval_node(node) if len(evaled_nodes) != 1: debug.warning("Eval'ed typing index %s should lead to 1 object, " " not %s" % (node, evaled_nodes)) return node evaled_node = list(evaled_nodes)[0] if isinstance(evaled_node, compiled.CompiledObject) and \ isinstance(evaled_node.obj, str): try: new_node = context.evaluator.grammar.parse( _compatibility.unicode(evaled_node.obj), start_symbol='eval_input', error_recovery=False ) except ParserSyntaxError: debug.warning('Annotation not parsed: %s' % evaled_node.obj) return node else: module = node.get_root_node() parser_utils.move(new_node, module.end_pos[0]) new_node.parent = context.tree_node return new_node else: return node
def get_index_types(self, evaluator, index_array): # If the object doesn't have `__getitem__`, just raise the # AttributeError. if not hasattr(self.obj, "__getitem__"): debug.warning("Tried to call __getitem__ on non-iterable.") return [] if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict): # Get rid of side effects, we won't call custom `__getitem__`s. return [] result = [] from jedi.evaluate.iterable import create_indexes_or_slices for typ in create_indexes_or_slices(evaluator, index_array): index = None try: index = typ.obj new = self.obj[index] except (KeyError, IndexError, TypeError, AttributeError): # Just try, we don't care if it fails, except for slices. if isinstance(index, slice): result.append(self) else: result.append(CompiledObject(new)) if not result: try: for obj in self.obj: result.append(CompiledObject(obj)) except TypeError: pass # self.obj maynot have an __iter__ method. return result
def parse_list_comp(token_iterator, token_list, start_pos, end_pos): def parse_stmt_or_arr(token_iterator, added_breaks=(), names_are_set_vars=False): stmt, tok = parse_stmt(token_iterator, allow_comma=True, added_breaks=added_breaks) if stmt is not None: for t in stmt._token_list: if isinstance(t, Name): t.parent = stmt stmt._names_are_set_vars = names_are_set_vars return stmt, tok st = Statement(self._sub_module, token_list, start_pos, end_pos, set_name_parents=False) middle, tok = parse_stmt_or_arr(token_iterator, ['in'], True) if tok != 'in' or middle is None: debug.warning('list comprehension middle %s@%s', tok, start_pos) return None, tok in_clause, tok = parse_stmt_or_arr(token_iterator) if in_clause is None: debug.warning('list comprehension in @%s', start_pos) return None, tok return ListComprehension(st, middle, in_clause, self), tok
def _get_buildout_script_paths(search_path): """ if there is a 'buildout.cfg' file in one of the parent directories of the given module it will return a list of all files in the buildout bin directory that look like python files. :param search_path: absolute path to the module. :type search_path: str """ project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg') if not project_root: return bin_path = os.path.join(project_root, 'bin') if not os.path.exists(bin_path): return for filename in os.listdir(bin_path): try: filepath = os.path.join(bin_path, filename) with open(filepath, 'r') as f: firstline = f.readline() if firstline.startswith('#!') and 'python' in firstline: yield filepath except (UnicodeDecodeError, IOError) as e: # Probably a binary file; permission error or race cond. because # file got deleted. Ignore it. debug.warning(unicode(e)) continue
def iter_content(self): """ The index is here just ignored, because of all the appends, etc. lists/sets are too complicated too handle that. """ items = [] from jedi.evaluate.representation import Instance for stmt in self.var_args: for typ in self._evaluator.eval_statement(stmt): if isinstance(typ, Instance) and len(typ.var_args): array = typ.var_args[0] if isinstance(array, ArrayInstance): # prevent recursions # TODO compare Modules if self.var_args.start_pos != array.var_args.start_pos: items += array.iter_content() else: debug.warning('ArrayInstance recursion %s', self.var_args) continue items += get_iterator_types([typ]) # TODO check if exclusion of tuple is a problem here. if isinstance(self.var_args, tuple) or self.var_args.parent is None: return [] # generated var_args should not be checked for arrays module = self.var_args.get_parent_until() is_list = str(self.instance.name) == 'list' items += _check_array_additions(self._evaluator, self.instance, module, is_list) return items
def _parse_class(self): """ The parser for a text class. Process the tokens, which follow a class definition. :return: Return a Scope representation of the tokens. :rtype: Class """ first_pos = self.start_pos token_type, cname = self.next() if token_type != tokenize.NAME: debug.warning("class: syntax err, token is not a name@%s (%s: %s)" % (self.start_pos[0], tokenize.tok_name[token_type], cname)) return None cname = pr.Name(self.module, [(cname, self.start_pos)], self.start_pos, self.end_pos) super = [] token_type, _next = self.next() if _next == '(': super = self._parse_parentheses() token_type, _next = self.next() if _next != ':': debug.warning("class syntax: %s@%s" % (cname, self.start_pos[0])) return None # because of 2 line class initializations scope = pr.Class(self.module, cname, super, first_pos) if self.user_scope and scope != self.user_scope \ and self.user_position > first_pos: self.user_scope = scope return scope
def parse_list_comp(token_iterator, token_list, start_pos, end_pos): def parse_stmt_or_arr(token_iterator, added_breaks=()): stmt, tok = parse_stmt(token_iterator, added_breaks=added_breaks) if not stmt: return None, tok if tok == ",": arr, tok = parse_array(token_iterator, Array.TUPLE, stmt.start_pos, stmt, added_breaks=added_breaks) used_vars = [] for stmt in arr: used_vars += stmt.used_vars start_pos = arr.start_pos[0], arr.start_pos[1] - 1 stmt = Statement(self._sub_module, [], used_vars, [], start_pos, arr.end_pos) arr.parent = stmt stmt.token_list = stmt._commands = [arr] else: for v in stmt.used_vars: v.parent = stmt return stmt, tok st = Statement(self._sub_module, [], [], token_list, start_pos, end_pos) middle, tok = parse_stmt_or_arr(token_iterator, added_breaks=["in"]) if tok != "in" or middle is None: debug.warning("list comprehension middle @%s" % str(start_pos)) return None, tok in_clause, tok = parse_stmt_or_arr(token_iterator) if in_clause is None: debug.warning("list comprehension in @%s" % str(start_pos)) return None, tok return ListComprehension(st, middle, in_clause, self), tok
def execute(self, obj, params=(), evaluate_generator=False): if obj.isinstance(er.Function): obj = obj.get_decorated_func() debug.dbg('execute: %s %s', obj, params) try: return stdlib.execute(self, obj, params) except stdlib.NotInStdLib: pass if isinstance(obj, iterable.GeneratorMethod): return obj.execute() elif obj.isinstance(compiled.CompiledObject): if obj.is_executable_class(): return [er.Instance(self, obj, params)] else: return list(obj.execute_function(self, params)) elif obj.isinstance(er.Class): # There maybe executions of executions. return [er.Instance(self, obj, params)] else: stmts = [] if obj.isinstance(er.Function): stmts = er.FunctionExecution(self, obj, params).get_return_types(evaluate_generator) else: if hasattr(obj, 'execute_subscope_by_name'): try: stmts = obj.execute_subscope_by_name('__call__', params) except KeyError: debug.warning("no __call__ func available %s", obj) else: debug.warning("no execution possible %s", obj) debug.dbg('execute result: %s in %s', stmts, obj) return imports.strip_imports(self, stmts)
def _parse_class(self): """ The parser for a text class. Process the tokens, which follow a class definition. :return: Return a Scope representation of the tokens. :rtype: Class """ first_pos = self._gen.current.start_pos cname = next(self._gen) if cname.type != tokenize.NAME: debug.warning("class: syntax err, token is not a name@%s (%s: %s)", cname.start_pos[0], tokenize.tok_name[cname.type], cname.string) return None cname = pr.Name(self.module, [(cname.string, cname.start_pos)], cname.start_pos, cname.end_pos) super = [] _next = next(self._gen) if _next.string == '(': super = self._parse_parentheses() _next = next(self._gen) if _next.string != ':': debug.warning("class syntax: %s@%s", cname, _next.start_pos[0]) return None return pr.Class(self.module, cname, super, first_pos)
def _star_star_dict(evaluator, array, expression_list, func): dct = {} from jedi.evaluate.representation import Instance if isinstance(array, Instance) and array.name.get_code() == 'dict': # For now ignore this case. In the future add proper iterators and just # make one call without crazy isinstance checks. return {} if isinstance(array, iterable.Array) and array.type == pr.Array.DICT: for key_stmt, value_stmt in array.items(): # first index, is the key if syntactically correct call = key_stmt.expression_list()[0] if isinstance(call, pr.Name): key = call elif isinstance(call, pr.Call): key = call.name else: debug.warning('Ignored complicated **kwargs stmt %s' % call) continue # We ignore complicated statements here, for now. # If the string is a duplicate, we don't care it's illegal Python # anyway. dct[str(key)] = key, value_stmt else: if expression_list: m = "TypeError: %s argument after ** must be a mapping, not %s" \ % (func.name.get_code(), array) analysis.add(evaluator, 'type-error-star-star', expression_list[0], message=m) return dct
def _apply_decorators(context, node): """ Returns the function, that should to be executed in the end. This is also the places where the decorators are processed. """ if node.type == 'classdef': decoratee_context = ClassContext( context.evaluator, parent_context=context, tree_node=node ) else: decoratee_context = FunctionContext.from_context(context, node) initial = values = ContextSet(decoratee_context) for dec in reversed(node.get_decorators()): debug.dbg('decorator: %s %s', dec, values) dec_values = context.eval_node(dec.children[1]) trailer_nodes = dec.children[2:-1] if trailer_nodes: # Create a trailer and evaluate it. trailer = tree.PythonNode('trailer', trailer_nodes) trailer.parent = dec dec_values = eval_trailer(context, dec_values, trailer) if not len(dec_values): debug.warning('decorator not found: %s on %s', dec, node) return initial values = dec_values.execute(arguments.ValuesArguments([values])) if not len(values): debug.warning('not possible to resolve wrappers found %s', node) return initial debug.dbg('decorator end %s', values) return values
def execute(self, arguments): """ In contrast to py__call__ this function is always available. `hasattr(x, py__call__)` can also be checked to see if a context is executable. """ if self.evaluator.is_analysis: arguments.eval_all() debug.dbg('execute: %s %s', self, arguments) from jedi.evaluate import stdlib try: # Some stdlib functions like super(), namedtuple(), etc. have been # hard-coded in Jedi to support them. return stdlib.execute(self.evaluator, self, arguments) except stdlib.NotInStdLib: pass try: func = self.py__call__ except AttributeError: debug.warning("no execution possible %s", self) return NO_CONTEXTS else: context_set = func(arguments) debug.dbg('execute result: %s in %s', context_set, self) return context_set return self.evaluator.execute(self, arguments)
def load_module(evaluator, path=None, name=None, sys_path=None): if sys_path is None: sys_path = list(evaluator.get_sys_path()) if path is not None: dotted_path = dotted_from_fs_path(path, sys_path=sys_path) else: dotted_path = name temp, sys.path = sys.path, sys_path try: __import__(dotted_path) except ImportError: # If a module is "corrupt" or not really a Python module or whatever. debug.warning('Module %s not importable in path %s.', dotted_path, path) return None except Exception: # Since __import__ pretty much makes code execution possible, just # catch any error here and print it. import traceback print_to_stderr("Cannot import:\n%s" % traceback.format_exc()) return None finally: sys.path = temp # Just access the cache after import, because of #59 as well as the very # complicated import structure of Python. module = sys.modules[dotted_path] return create_access_path(evaluator, module)
def _split_comment_param_declaration(decl_text): """ Split decl_text on commas, but group generic expressions together. For example, given "foo, Bar[baz, biz]" we return ['foo', 'Bar[baz, biz]']. """ try: node = parse(decl_text, error_recovery=False).children[0] except ParserSyntaxError: debug.warning('Comment annotation is not valid Python: %s' % decl_text) return [] if node.type == 'name': return [node.get_code().strip()] params = [] try: children = node.children except AttributeError: return [] else: for child in children: if child.type in ['name', 'atom_expr', 'power']: params.append(child.get_code().strip()) return params
def load_module(evaluator, path=None, name=None): sys_path = evaluator.sys_path if path is not None: dotted_path = dotted_from_fs_path(path, sys_path=sys_path) else: dotted_path = name if dotted_path is None: p, _, dotted_path = path.partition(os.path.sep) sys_path.insert(0, p) temp, sys.path = sys.path, sys_path try: __import__(dotted_path) except RuntimeError: if 'PySide' in dotted_path or 'PyQt' in dotted_path: # RuntimeError: the PyQt4.QtCore and PyQt5.QtCore modules both wrap # the QObject class. # See https://github.com/davidhalter/jedi/pull/483 return None raise except ImportError: # If a module is "corrupt" or not really a Python module or whatever. debug.warning('Module %s not importable in path %s.', dotted_path, path) return None finally: sys.path = temp # Just access the cache after import, because of #59 as well as the very # complicated import structure of Python. module = sys.modules[dotted_path] return create(evaluator, module)
def push_stmt(self, stmt): self.current = _RecursionNode(stmt, self.current) check = self._check_recursion() if check: debug.warning("catched stmt recursion: %s against %s @%s", stmt, check.stmt, stmt.start_pos) self.pop_stmt() return True return False
def get_set_vars(self): n = super(Function, self).get_set_vars() for p in self.params: try: n.append(p.get_name()) except IndexError: debug.warning("multiple names in param %s" % n) return n
def py__getitem__(self, index): try: names = self.get_function_slot_names('__getitem__') except KeyError: debug.warning('No __getitem__, cannot access the array.') return NO_CONTEXTS else: index_obj = compiled.create(self.evaluator, index) return self.execute_function_slots(names, index_obj)
def push_stmt(self, stmt): self.current = RecursionNode(stmt, self.current) check = self._check_recursion() if check: # TODO remove False!!!! debug.warning('catched stmt recursion: %s against %s @%s' % (stmt, check.stmt, stmt.start_pos)) self.pop_stmt() return True return False
def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None): exception = CODES[name][1] if _check_for_exception_catch(evaluator, jedi_obj, exception, payload): return module_path = jedi_obj.get_parent_until().path instance = typ(name, module_path, jedi_obj.start_pos, message) debug.warning(str(instance)) evaluator.analysis.append(instance)
def _add_error(context, name, message=None): # Should be a name, not a string! if message is None: name_str = str(name.value) if isinstance(name, tree.Name) else name message = 'No module named ' + name_str if hasattr(name, 'parent'): analysis.add(context, 'import-error', name, message) else: debug.warning('ImportError without origin: ' + message)
def add(node_context, error_name, node, message=None, typ=Error, payload=None): exception = CODES[error_name][1] if _check_for_exception_catch(node_context, node, exception, payload): return module_path = node.get_root_node().path instance = typ(error_name, module_path, node.start_pos, message) debug.warning(str(instance), format=False) node_context.evaluator.analysis.append(instance)
def py__getitem__(self, index): try: method = self.get_subscope_by_name('__getitem__') except KeyError: debug.warning('No __getitem__, cannot access the array.') return set() else: index_obj = compiled.create(self._evaluator, index) return self._evaluator.execute_evaluated(method, index_obj)
def builtins_getattr(evaluator, objects, names, defaults=None): # follow the first param for obj in objects: for name in names: if is_string(name): return obj.py__getattribute__(force_unicode(name.get_safe_value())) else: debug.warning('getattr called without str') continue return NO_CONTEXTS
def _py__stop_iteration_returns(generators): results = ContextSet() for generator in generators: try: method = generator.py__stop_iteration_returns except AttributeError: debug.warning('%s is not actually a generator', generator) else: results |= method() return results
def builtins_getattr(evaluator, objects, names, defaults=None): # follow the first param for obj in objects: for name in names: if precedence.is_string(name): return obj.py__getattribute__(name.obj) else: debug.warning('getattr called without str') continue return set()
def eval_argument_clinic(self, parameters): """Uses a list with argument clinic information (see PEP 436).""" iterator = self.unpack() for i, (name, optional, allow_kwargs) in enumerate(parameters): key, argument = next(iterator, (None, None)) if key is not None: raise NotImplementedError if argument is None and not optional: debug.warning('TypeError: %s expected at least %s arguments, got %s', name, len(parameters), i) raise ValueError values = NO_CONTEXTS if argument is None else argument.infer() if not values and not optional: # For the stdlib we always want values. If we don't get them, # that's ok, maybe something is too hard to resolve, however, # we will not proceed with the evaluation of that function. debug.warning('argument_clinic "%s" not resolvable.', name) raise ValueError yield values
def _get_paths_from_buildout_script(evaluator, buildout_script_path): try: module_node = evaluator.parse(path=buildout_script_path, cache=True, cache_path=settings.cache_directory) except IOError: debug.warning('Error trying to read buildout_script: %s', buildout_script_path) return from jedi.evaluate.context import ModuleContext module = ModuleContext( evaluator, module_node, buildout_script_path, code_lines=get_cached_code_lines(evaluator.grammar, buildout_script_path), ) for path in check_sys_path_modifications(module): yield path
def wrapper(context, *args, **kwargs): n = context.tree_node inference_state = context.inference_state try: inference_state.inferred_element_counts[n] += 1 maximum = 300 if context.parent_context is None \ and context.get_value() is inference_state.builtins_module: # Builtins should have a more generous inference limit. # It is important that builtins can be executed, otherwise some # functions that depend on certain builtins features would be # broken, see e.g. GH #1432 maximum *= 100 if inference_state.inferred_element_counts[n] > maximum: debug.warning('In value %s there were too many inferences.', n) return NO_VALUES except KeyError: inference_state.inferred_element_counts[n] = 1 return func(context, *args, **kwargs)
def py__iter__(self): iter_slot_names = self.get_function_slot_names('__iter__') if not iter_slot_names: debug.warning('No __iter__ on %s.' % self) return for generator in self.execute_function_slots(iter_slot_names): if isinstance(generator, AbstractInstanceContext): # `__next__` logic. name = '__next__' if is_py3 else 'next' iter_slot_names = generator.get_function_slot_names(name) if iter_slot_names: yield LazyKnownContexts( generator.execute_function_slots(iter_slot_names)) else: debug.warning('Instance has no __next__ function in %s.', generator) else: for lazy_context in generator.py__iter__(): yield lazy_context
def _get_paths_from_buildout_script(inference_state, buildout_script_path): file_io = FileIO(str(buildout_script_path)) try: module_node = inference_state.parse( file_io=file_io, cache=True, cache_path=settings.cache_directory) except IOError: debug.warning('Error trying to read buildout_script: %s', buildout_script_path) return from jedi.inference.value import ModuleValue module_context = ModuleValue( inference_state, module_node, file_io=file_io, string_names=None, code_lines=get_cached_code_lines(inference_state.grammar, str(buildout_script_path)), ).as_context() yield from check_sys_path_modifications(module_context)
def eval_trailer(self, types, trailer): trailer_op, node = trailer.children[:2] if node == ')': # `arglist` is optional. node = () new_types = [] for typ in types: debug.dbg('eval_trailer: %s in scope %s', trailer, typ) if trailer_op == '.': new_types += self.find_types(typ, node) elif trailer_op == '(': new_types += self.execute(typ, node, trailer) elif trailer_op == '[': try: get = typ.get_index_types except AttributeError: debug.warning( "TypeError: '%s' object is not subscriptable" % typ) else: new_types += get(self, node) return new_types
def eval_argument_clinic(self, arguments): """Uses a list with argument clinic information (see PEP 436).""" iterator = self.unpack() for i, (name, optional, allow_kwargs) in enumerate(arguments): key, va_values = next(iterator, (None, [])) if key is not None: raise NotImplementedError if not va_values and not optional: debug.warning('TypeError: %s expected at least %s arguments, got %s', name, len(arguments), i) raise ValueError values = list(chain.from_iterable(self._evaluator.eval_element(el) for el in va_values)) if not values and not optional: # For the stdlib we always want values. If we don't get them, # that's ok, maybe something is too hard to resolve, however, # we will not proceed with the evaluation of that function. debug.warning('argument_clinic "%s" not resolvable.', name) raise ValueError yield values
def __init__(self, parent_context, tree_name, var_name, unpacked_args): super().__init__(parent_context, tree_name) self._var_name = var_name self._constraints_lazy_values = [] self._bound_lazy_value = None self._covariant_lazy_value = None self._contravariant_lazy_value = None for key, lazy_value in unpacked_args: if key is None: self._constraints_lazy_values.append(lazy_value) else: if key == 'bound': self._bound_lazy_value = lazy_value elif key == 'covariant': self._covariant_lazy_value = lazy_value elif key == 'contravariant': self._contra_variant_lazy_value = lazy_value else: debug.warning('Invalid TypeVar param name %s', key)
def _find_string_name(self, lazy_value): if lazy_value is None: return None value_set = lazy_value.infer() if not value_set: return None if len(value_set) > 1: debug.warning('Found multiple values for a type variable: %s', value_set) name_value = next(iter(value_set)) try: method = name_value.get_safe_value except AttributeError: return None else: safe_value = method(default=None) if isinstance(safe_value, str): return safe_value return None
def _apply_decorators(context, node): """ Returns the function, that should to be executed in the end. This is also the places where the decorators are processed. """ if node.type == 'classdef': decoratee_context = ClassContext( context.evaluator, parent_context=context, tree_node=node ) else: decoratee_context = FunctionContext.from_context(context, node) initial = values = ContextSet([decoratee_context]) for dec in reversed(node.get_decorators()): debug.dbg('decorator: %s %s', dec, values, color="MAGENTA") with debug.increase_indent_cm(): dec_values = context.eval_node(dec.children[1]) trailer_nodes = dec.children[2:-1] if trailer_nodes: # Create a trailer and evaluate it. trailer = tree.PythonNode('trailer', trailer_nodes) trailer.parent = dec dec_values = eval_trailer(context, dec_values, trailer) if not len(dec_values): code = dec.get_code(include_prefix=False) # For the short future, we don't want to hear about the runtime # decorator in typing that was intentionally omitted. This is not # "correct", but helps with debugging. if code != '@runtime\n': debug.warning('decorator not found: %s on %s', dec, node) return initial values = dec_values.execute(arguments.ValuesArguments([values])) if not len(values): debug.warning('not possible to resolve wrappers found %s', node) return initial debug.dbg('decorator end %s', values, color="MAGENTA") return values
def _follow_path(self, path, typ, scope): """ Uses a generator and tries to complete the path, e.g.:: foo.bar.baz `_follow_path` is only responsible for completing `.bar.baz`, the rest is done in the `follow_call` function. """ # current is either an Array or a Scope. try: current = next(path) except StopIteration: return None debug.dbg('_follow_path: %s in scope %s', current, typ) result = [] if isinstance(current, pr.Array): # This must be an execution, either () or []. if current.type == pr.Array.LIST: if hasattr(typ, 'get_index_types'): slc = iterable.create_indexes_or_slices(self, current) result = typ.get_index_types(slc) elif current.type not in [pr.Array.DICT]: # Scope must be a class or func - make an instance or execution. result = self.execute(typ, current) else: # Curly braces are not allowed, because they make no sense. debug.warning('strange function call with {} %s %s', current, typ) else: # The function must not be decorated with something else. if typ.isinstance(er.Function): typ = typ.get_magic_function_scope() else: # This is the typical lookup while chaining things. if filter_private_variable(typ, scope, current): return [] types = self.find_types(typ, current) result = imports.strip_imports(self, types) return self.follow_path(path, result, scope)
def execute(self, obj, arguments): if self.is_analysis: arguments.eval_all() debug.dbg('execute: %s %s', obj, arguments) try: # Some stdlib functions like super(), namedtuple(), etc. have been # hard-coded in Jedi to support them. return stdlib.execute(self, obj, arguments) except stdlib.NotInStdLib: pass try: func = obj.py__call__ except AttributeError: debug.warning("no execution possible %s", obj) return set() else: types = func(arguments) debug.dbg('execute result: %s in %s', types, obj) return types
def py__iter__(self): try: method = self.get_subscope_by_name('__iter__') except KeyError: debug.warning('No __iter__ on %s.' % self) return else: iters = self._evaluator.execute(method) for generator in iters: if isinstance(generator, Instance): # `__next__` logic. name = '__next__' if is_py3 else 'next' try: yield generator.execute_subscope_by_name(name) except KeyError: debug.warning( 'Instance has no __next__ function in %s.', generator) else: for typ in generator.py__iter__(): yield typ
def eval_annotation(context, annotation): """ Evaluates an annotation node. This means that it evaluates the part of `int` here: foo: int = 3 Also checks for forward references (strings) """ context_set = context.eval_node(annotation) if len(context_set) != 1: debug.warning("Eval'ed typing index %s should lead to 1 object, " " not %s" % (annotation, context_set)) return context_set evaled_context = list(context_set)[0] if is_string(evaled_context): result = _get_forward_reference_node(context, evaled_context.get_safe_value()) if result is not None: return context.eval_node(result) return context_set
def infer_annotation(context, annotation): """ Inferes an annotation node. This means that it inferes the part of `int` here: foo: int = 3 Also checks for forward references (strings) """ value_set = context.infer_node(annotation) if len(value_set) != 1: debug.warning("Inferred typing index %s should lead to 1 object, " " not %s" % (annotation, value_set)) return value_set inferred_value = list(value_set)[0] if is_string(inferred_value): result = _get_forward_reference_node(context, inferred_value.get_safe_value()) if result is not None: return context.infer_node(result) return value_set
def builtins_getattr(evaluator, obj, params): stmts = [] # follow the first param objects = _follow_param(evaluator, params, 0) names = _follow_param(evaluator, params, 1) for obj in objects: if not isinstance( obj, (er.Instance, er.Class, pr.Module, compiled.CompiledObject)): debug.warning('getattr called without instance') continue for name in names: s = unicode, str if isinstance(name, compiled.CompiledObject) and isinstance( name.obj, s): stmts += evaluator.follow_path(iter([name.obj]), [obj], obj) else: debug.warning('getattr called without str') continue return stmts
def parse_list_comp(token_iterator, token_list, start_pos, end_pos): def parse_stmt_or_arr(token_iterator, added_breaks=()): stmt, tok = parse_stmt(token_iterator, added_breaks=added_breaks) if not stmt: return None, tok if tok == ',': arr, tok = parse_array(token_iterator, Array.TUPLE, stmt.start_pos, stmt, added_breaks=added_breaks) used_vars = [] for stmt in arr: used_vars += stmt.used_vars start_pos = arr.start_pos[0], arr.start_pos[1] - 1 stmt = Statement(self._sub_module, [], used_vars, [], start_pos, arr.end_pos) arr.parent = stmt stmt.token_list = stmt._commands = [arr] else: for v in stmt.used_vars: v.parent = stmt return stmt, tok st = Statement(self._sub_module, [], [], token_list, start_pos, end_pos) middle, tok = parse_stmt_or_arr(token_iterator, added_breaks=['in']) if tok != 'in' or middle is None: debug.warning('list comprehension middle @%s' % str(start_pos)) return None, tok in_clause, tok = parse_stmt_or_arr(token_iterator) if in_clause is None: debug.warning('list comprehension in @%s' % str(start_pos)) return None, tok return ListComprehension(st, middle, in_clause, self), tok
def get_iterator_types(inputs): """Returns the types of any iterator (arrays, yields, __iter__, etc).""" iterators = [] # Take the first statement (for has always only # one, remember `in`). And follow it. for it in inputs: if isinstance(it, (er.Generator, er.Array, dynamic.ArrayInstance)): iterators.append(it) else: if not hasattr(it, 'execute_subscope_by_name'): debug.warning('iterator/for loop input wrong', it) continue try: iterators += it.execute_subscope_by_name('__iter__') except KeyError: debug.warning('iterators: No __iter__ method found.') result = [] for gen in iterators: if isinstance(gen, er.Array): # Array is a little bit special, since this is an internal # array, but there's also the list builtin, which is # another thing. result += gen.get_index_types() elif isinstance(gen, er.Instance): # __iter__ returned an instance. name = '__next__' if is_py3k else 'next' try: result += gen.execute_subscope_by_name(name) except KeyError: debug.warning('Instance has no __next__ function', gen) else: # is a generator result += gen.iter_content() return result
def _iterate_argument_clinic(evaluator, arguments, parameters): """Uses a list with argument clinic information (see PEP 436).""" iterator = PushBackIterator(arguments.unpack()) for i, (name, optional, allow_kwargs, stars) in enumerate(parameters): if stars == 1: lazy_contexts = [] for key, argument in iterator: if key is not None: iterator.push_back((key, argument)) break lazy_contexts.append(argument) yield ContextSet([iterable.FakeSequence(evaluator, u'tuple', lazy_contexts)]) lazy_contexts continue elif stars == 2: raise NotImplementedError() key, argument = next(iterator, (None, None)) if key is not None: debug.warning('Keyword arguments in argument clinic are currently not supported.') raise ParamIssue if argument is None and not optional: debug.warning('TypeError: %s expected at least %s arguments, got %s', name, len(parameters), i) raise ParamIssue context_set = NO_CONTEXTS if argument is None else argument.infer() if not context_set and not optional: # For the stdlib we always want values. If we don't get them, # that's ok, maybe something is too hard to resolve, however, # we will not proceed with the evaluation of that function. debug.warning('argument_clinic "%s" not resolvable.', name) raise ParamIssue yield context_set
def get_decorated_func(self): """ Returns the function, that should to be executed in the end. This is also the places where the decorators are processed. """ f = self.base_func decorators = self.base_func.get_decorators() if not decorators or self.is_decorated: return self # Only enter it, if has not already been processed. if not self.is_decorated: for dec in reversed(decorators): debug.dbg('decorator: %s %s', dec, f) dec_results = self._evaluator.eval_element(dec.children[1]) trailer = dec.children[2:-1] if trailer: # Create a trailer and evaluate it. trailer = pr.Node('trailer', trailer) dec_results = self._evaluator.eval_trailer(dec_results, trailer) if not len(dec_results): debug.warning('decorator not found: %s on %s', dec, self.base_func) return self decorator = dec_results.pop() if dec_results: debug.warning('multiple decorators found %s %s', self.base_func, dec_results) # Create param array. if isinstance(f, Function): old_func = f # TODO this is just hacky. change. else: old_func = Function(self._evaluator, f, is_decorated=True) wrappers = self._evaluator.execute_evaluated(decorator, old_func) if not len(wrappers): debug.warning('no wrappers found %s', self.base_func) return self if len(wrappers) > 1: # TODO resolve issue with multiple wrappers -> multiple types debug.warning('multiple wrappers found %s %s', self.base_func, wrappers) f = wrappers[0] if isinstance(f, (Instance, Function)): f.decorates = self debug.dbg('decorator end %s', f) return f
def follow(self, is_goto=False): """ Returns the imported modules. """ if evaluate.follow_statement.push_stmt(self.import_stmt): # check recursion return [] if self.import_path: try: scope, rest = self._follow_file_system() except ModuleNotFound: debug.warning('Module not found: ' + str(self.import_stmt)) evaluate.follow_statement.pop_stmt() return [] scopes = [scope] scopes += remove_star_imports(scope) # follow the rest of the import (not FS -> classes, functions) if len(rest) > 1 or rest and self.is_like_search: scopes = [] elif rest: if is_goto: scopes = itertools.chain.from_iterable( evaluate.find_name(s, rest[0], is_goto=True) for s in scopes) else: scopes = itertools.chain.from_iterable( evaluate.follow_path(iter(rest), s, s) for s in scopes) scopes = list(scopes) if self.is_nested_import(): scopes.append(self.get_nested_import(scope)) else: scopes = [ImportPath.GlobalNamespace] debug.dbg('after import', scopes) evaluate.follow_statement.pop_stmt() return scopes
def follow_path(path, scope, call_scope, position=None): """ Uses a generator and tries to complete the path, e.g.:: foo.bar.baz `follow_path` is only responsible for completing `.bar.baz`, the rest is done in the `follow_call` function. """ # current is either an Array or a Scope. try: current = next(path) except StopIteration: return None debug.dbg('follow %s in scope %s' % (current, scope)) result = [] if isinstance(current, pr.Array): # This must be an execution, either () or []. if current.type == pr.Array.LIST: if hasattr(scope, 'get_index_types'): result = scope.get_index_types(current) elif current.type not in [pr.Array.DICT]: # Scope must be a class or func - make an instance or execution. debug.dbg('exe', scope) result = er.Execution(scope, current).get_return_types() else: # Curly braces are not allowed, because they make no sense. debug.warning('strange function call with {}', current, scope) else: # The function must not be decorated with something else. if scope.isinstance(er.Function): scope = scope.get_magic_method_scope() else: # This is the typical lookup while chaining things. if filter_private_variable(scope, call_scope, current): return [] result = imports.strip_imports(find_name(scope, current, position=position)) return follow_paths(path, set(result), call_scope, position=position)
def _apply_decorators(context, node): """ Returns the function, that should to be executed in the end. This is also the places where the decorators are processed. """ if node.type == 'classdef': decoratee_context = ClassContext( context.evaluator, parent_context=context, classdef=node ) else: decoratee_context = FunctionContext( context.evaluator, parent_context=context, funcdef=node ) initial = values = ContextSet(decoratee_context) for dec in reversed(node.get_decorators()): debug.dbg('decorator: %s %s', dec, values) dec_values = context.eval_node(dec.children[1]) trailer_nodes = dec.children[2:-1] if trailer_nodes: # Create a trailer and evaluate it. trailer = tree.PythonNode('trailer', trailer_nodes) trailer.parent = dec dec_values = eval_trailer(context, dec_values, trailer) if not len(dec_values): debug.warning('decorator not found: %s on %s', dec, node) return initial values = dec_values.execute(arguments.ValuesArguments([values])) if not len(values): debug.warning('not possible to resolve wrappers found %s', node) return initial debug.dbg('decorator end %s', values) return values
def _fix_forward_reference(context, node): evaled_nodes = context.eval_node(node) if len(evaled_nodes) != 1: debug.warning("Eval'ed typing index %s should lead to 1 object, " " not %s" % (node, evaled_nodes)) return node evaled_node = list(evaled_nodes)[0] if isinstance(evaled_node, compiled.CompiledObject) and \ isinstance(evaled_node.obj, str): try: new_node = parse(_compatibility.unicode(evaled_node.obj), start_symbol='eval_input', error_recovery=False) except ParserSyntaxError: debug.warning('Annotation not parsed: %s' % evaled_node.obj) return node else: module = node.get_root_node() parser_utils.move(new_node, module.end_pos[0]) new_node.parent = context.tree_node return new_node else: return node
def do_import(importer, import_parts, import_path, sys_path): """ Extension import function. Calls each extension function found using :meth:`_find_extensions` and returns the result of the first successful call. See :meth:`Importer.do_import<evaluate.imports.Importer.do_import>` for parameter and result description. """ result = NO_CONTEXTS for impext in _import_extensions: try: result = impext(importer, import_parts, import_path, sys_path) except Exception as e: debug.warning("Failed to execute importer %s: %s", impext, e, format=True) else: if result != NO_CONTEXTS: break return result
def _fix_forward_reference(context, node): evaled_nodes = context.eval_node(node) if len(evaled_nodes) != 1: debug.warning("Eval'ed typing index %s should lead to 1 object, " " not %s" % (node, evaled_nodes)) return node evaled_node = list(evaled_nodes)[0] if is_string(evaled_node): try: new_node = context.evaluator.grammar.parse( force_unicode(evaled_node.get_safe_value()), start_symbol='eval_input', error_recovery=False) except ParserSyntaxError: debug.warning('Annotation not parsed: %s' % evaled_node) return node else: module = node.get_root_node() parser_utils.move(new_node, module.end_pos[0]) new_node.parent = context.tree_node return new_node else: return node
def _find_string_name(self, lazy_value): if lazy_value is None: return None value_set = lazy_value.infer() if not value_set: return None if len(value_set) > 1: debug.warning('Found multiple values for a type variable: %s', value_set) name_value = next(iter(value_set)) try: method = name_value.get_safe_value except AttributeError: return None else: safe_value = method(default=None) if self.inference_state.environment.version_info.major == 2: if isinstance(safe_value, bytes): return force_unicode(safe_value) if isinstance(safe_value, (str, unicode)): return safe_value return None
def py__iter__(self): iter_slot_names = self.get_function_slot_names(u'__iter__') if not iter_slot_names: debug.warning('No __iter__ on %s.' % self) return for generator in self.execute_function_slots(iter_slot_names): if isinstance(generator, AbstractInstanceContext): # `__next__` logic. if self.evaluator.environment.version_info.major == 2: name = u'next' else: name = u'__next__' iter_slot_names = generator.get_function_slot_names(name) if iter_slot_names: yield LazyKnownContexts( generator.execute_function_slots(iter_slot_names)) else: debug.warning('Instance has no __next__ function in %s.', generator) else: for lazy_context in generator.py__iter__(): yield lazy_context
def _fix_forward_reference(context, node): evaled_nodes = context.eval_node(node) if len(evaled_nodes) != 1: debug.warning("Eval'ed typing index %s should lead to 1 object, " " not %s" % (node, evaled_nodes)) return node evaled_node = list(evaled_nodes)[0] if isinstance(evaled_node, compiled.CompiledObject) and \ isinstance(evaled_node.obj, str): try: p = Parser(load_grammar(), _compatibility.unicode(evaled_node.obj), start_symbol='eval_input') new_node = p.get_parsed_node() except ParseError: debug.warning('Annotation not parsed: %s' % evaled_node.obj) return node else: module = node.get_parent_until() new_node.move(module.end_pos[0]) new_node.parent = context.tree_node return new_node else: return node
def execute(self, obj, params=(), evaluate_generator=False): if obj.isinstance(er.Function): obj = obj.get_decorated_func() debug.dbg('execute: %s %s', obj, params) try: return stdlib.execute(self, obj, params) except stdlib.NotInStdLib: pass if isinstance(obj, iterable.GeneratorMethod): return obj.execute() elif obj.isinstance(compiled.CompiledObject): if obj.is_executable_class(): return [er.Instance(self, obj, params)] else: return list(obj.execute_function(self, params)) elif obj.isinstance(er.Class): # There maybe executions of executions. return [er.Instance(self, obj, params)] else: stmts = [] if obj.isinstance(er.Function): stmts = er.FunctionExecution( self, obj, params).get_return_types(evaluate_generator) else: if hasattr(obj, 'execute_subscope_by_name'): try: stmts = obj.execute_subscope_by_name( '__call__', params) except KeyError: debug.warning("no __call__ func available %s", obj) else: debug.warning("no execution possible %s", obj) debug.dbg('execute result: %s in %s', stmts, obj) return imports.strip_imports(self, stmts)