def convert_contexts(contexts, only_stubs=False, prefer_stubs=False, ignore_compiled=True): assert not (only_stubs and prefer_stubs) with debug.increase_indent_cm('convert contexts'): if only_stubs or prefer_stubs: return ContextSet.from_sets( to_stub(context) or (ContextSet({context}) if prefer_stubs else NO_CONTEXTS) for context in contexts ) else: return ContextSet.from_sets( _stub_to_python_context_set(stub_context, ignore_compiled=ignore_compiled) or ContextSet({stub_context}) for stub_context in contexts )
def get_return_values(self, check_yields=False): funcdef = self.tree_node if funcdef.type == 'lambdef': return self.eval_node(funcdef.children[-1]) if check_yields: context_set = NO_CONTEXTS returns = get_yield_exprs(self.evaluator, funcdef) else: returns = funcdef.iter_return_stmts() context_set = docstrings.infer_return_types(self.function_context) context_set |= pep0484.infer_return_types(self.function_context) for r in returns: check = flow_analysis.reachability_check(self, funcdef, r) if check is flow_analysis.UNREACHABLE: debug.dbg('Return unreachable: %s', r) else: if check_yields: context_set |= ContextSet.from_sets( lazy_context.infer() for lazy_context in self._get_yield_lazy_context(r) ) else: try: children = r.children except AttributeError: ctx = compiled.builtin_from_name(self.evaluator, u'None') context_set |= ContextSet(ctx) else: context_set |= self.eval_node(children[1]) if check is flow_analysis.REACHABLE: debug.dbg('Return reachable: %s', r) break return context_set
def builtins_isinstance(evaluator, objects, types, arguments): bool_results = set() for o in objects: try: mro_func = o.py__class__().py__mro__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. return ContextSet(compiled.create(True), compiled.create(False)) mro = mro_func() for cls_or_tup in types: if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) elif cls_or_tup.name.string_name == 'tuple' \ and cls_or_tup.get_root_context() == evaluator.BUILTINS: # Check for tuples. classes = ContextSet.from_sets( lazy_context.infer() for lazy_context in cls_or_tup.iterate()) bool_results.add(any(cls in mro for cls in classes)) else: _, lazy_context = list(arguments.unpack())[1] if isinstance(lazy_context, LazyTreeContext): node = lazy_context.data message = 'TypeError: isinstance() arg 2 must be a ' \ 'class, type, or tuple of classes and types, ' \ 'not %s.' % cls_or_tup analysis.add(lazy_context._context, 'type-error-isinstance', node, message) return ContextSet.from_iterable( compiled.create(evaluator, x) for x in bool_results)
def py__call__(self, arguments): names = self.get_function_slot_names(u'__call__') if not names: # Means the Instance is not callable. return super(AbstractInstanceContext, self).py__call__(arguments) return ContextSet.from_sets(name.infer().execute(arguments) for name in names)
def stub_to_actual_context_set(stub_context, ignore_compiled=False): stub_module = stub_context.get_root_context() if not stub_module.is_stub(): return ContextSet([stub_context]) was_instance = stub_context.is_instance() if was_instance: stub_context = stub_context.py__class__() qualified_names = stub_context.get_qualified_names() if qualified_names is None: return NO_CONTEXTS was_bound_method = stub_context.is_bound_method() if was_bound_method: # Infer the object first. We can infer the method later. method_name = qualified_names[-1] qualified_names = qualified_names[:-1] was_instance = True contexts = _infer_from_stub(stub_module, qualified_names, ignore_compiled) if was_instance: contexts = ContextSet.from_sets(c.execute_evaluated() for c in contexts if c.is_class()) if was_bound_method: # Now that the instance has been properly created, we can simply get # the method. contexts = contexts.py__getattribute__(method_name) return contexts
def to_stub(context): if context.is_stub(): return ContextSet([context]) was_instance = context.is_instance() if was_instance: context = context.py__class__() qualified_names = context.get_qualified_names() stub_module = _load_stub_module(context.get_root_context()) if stub_module is None or qualified_names is None: return NO_CONTEXTS was_bound_method = context.is_bound_method() if was_bound_method: # Infer the object first. We can infer the method later. method_name = qualified_names[-1] qualified_names = qualified_names[:-1] was_instance = True stub_contexts = ContextSet([stub_module]) for name in qualified_names: stub_contexts = stub_contexts.py__getattribute__(name) if was_instance: stub_contexts = ContextSet.from_sets(c.execute_evaluated() for c in stub_contexts if c.is_class()) if was_bound_method: # Now that the instance has been properly created, we can simply get # the method. stub_contexts = stub_contexts.py__getattribute__(method_name) return stub_contexts
def py__getitem__(self, index_context_set, contextualized_node): if self._is_homogenous(): return self._get_getitem_contexts(0).execute_annotation() return ContextSet.from_sets( _iter_over_arguments(self._index_context, self._context_of_index)).execute_annotation()
def _names_to_types(self, names, attribute_lookup): contexts = ContextSet.from_sets(name.infer() for name in names) debug.dbg('finder._names_to_types: %s -> %s', names, contexts) if not names and self._context.is_instance( ) and not self._context.is_compiled(): # handling __getattr__ / __getattribute__ return self._check_getattr(self._context) # Add isinstance and other if/assert knowledge. if not contexts and isinstance(self._name, tree.Name) and \ not self._name_context.is_instance() and not self._context.is_compiled(): flow_scope = self._name base_nodes = [self._name_context.tree_node] if any(b.type in ('comp_for', 'sync_comp_for') for b in base_nodes): return contexts while True: flow_scope = get_parent_scope(flow_scope, include_flows=True) n = _check_flow_information(self._name_context, flow_scope, self._name, self._position) if n is not None: return n if flow_scope in base_nodes: break return contexts
def builtins_isinstance(evaluator, objects, types, arguments): bool_results = set() for o in objects: try: mro_func = o.py__class__().py__mro__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. return ContextSet(compiled.create(True), compiled.create(False)) mro = mro_func() for cls_or_tup in types: if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) elif cls_or_tup.name.string_name == 'tuple' \ and cls_or_tup.get_root_context() == evaluator.BUILTINS: # Check for tuples. classes = ContextSet.from_sets( lazy_context.infer() for lazy_context in cls_or_tup.iterate() ) bool_results.add(any(cls in mro for cls in classes)) else: _, lazy_context = list(arguments.unpack())[1] if isinstance(lazy_context, LazyTreeContext): node = lazy_context.data message = 'TypeError: isinstance() arg 2 must be a ' \ 'class, type, or tuple of classes and types, ' \ 'not %s.' % cls_or_tup analysis.add(lazy_context._context, 'type-error-isinstance', node, message) return ContextSet.from_iterable(compiled.create(evaluator, x) for x in bool_results)
def infer(self): with recursion.execution_allowed(self.evaluator, self) as allowed: # We need to catch recursions that may occur, because an # anonymous functions can create an anonymous parameter that is # more or less self referencing. if allowed: return ContextSet.from_sets(p.infer() for p in self._executed_params) return NO_CONTEXTS
def try_stubs_to_actual_context_set(stub_contexts, prefer_stub_to_compiled=False): contexts = ContextSet.from_sets( stub_to_actual_context_set(stub_context, ignore_compiled=prefer_stub_to_compiled) or ContextSet([stub_context]) for stub_context in stub_contexts) debug.dbg('Stubs to actual: %s to %s', stub_contexts, contexts) return contexts
def py__getitem__(self, index_context_set, contextualized_node): names = self.get_function_slot_names(u'__getitem__') if not names: return super(AbstractInstanceContext, self).py__getitem__( index_context_set, contextualized_node, ) args = ValuesArguments([index_context_set]) return ContextSet.from_sets(name.infer().execute(args) for name in names)
def _execute_types_in_stmt(module_context, stmt): """ Executing all types or general elements that we find in a statement. This doesn't include tuple, list and dict literals, because the stuff they contain is executed. (Used as type information). """ definitions = module_context.eval_node(stmt) return ContextSet.from_sets( _execute_array_values(module_context.evaluator, d) for d in definitions)
def py__getitem__(context, typ, node): if not typ.get_root_context().name.string_name == "typing": return None # we assume that any class using [] in a module called # "typing" with a name for which we have a replacement # should be replaced by that class. This is not 100% # airtight but I don't have a better idea to check that it's # actually the PEP-0484 typing module and not some other if node.type == "subscriptlist": nodes = node.children[::2] # skip the commas else: nodes = [node] del node nodes = [_fix_forward_reference(context, node) for node in nodes] type_name = typ.name.string_name # hacked in Union and Optional, since it's hard to do nicely in parsed code if type_name in ("Union", '_Union'): # In Python 3.6 it's still called typing.Union but it's an instance # called _Union. return ContextSet.from_sets(context.eval_node(node) for node in nodes) if type_name in ("Optional", '_Optional'): # Here we have the same issue like in Union. Therefore we also need to # check for the instance typing._Optional (Python 3.6). return context.eval_node(nodes[0]) module_node, code_lines = _get_typing_replacement_module(context.evaluator.latest_grammar) typing = ModuleContext( context.evaluator, module_node=module_node, path=None, code_lines=code_lines, ) factories = typing.py__getattribute__("factory") assert len(factories) == 1 factory = list(factories)[0] assert factory function_body_nodes = factory.tree_node.children[4].children valid_classnames = set(child.name.value for child in function_body_nodes if isinstance(child, tree.Class)) if type_name not in valid_classnames: return None compiled_classname = compiled.create_simple_object(context.evaluator, type_name) from jedi.evaluate.context.iterable import FakeSequence args = FakeSequence( context.evaluator, u'tuple', [LazyTreeContext(context, n) for n in nodes] ) result = factory.execute_evaluated(compiled_classname, args) return result
def _execute_types_in_stmt(module_context, stmt): """ Executing all types or general elements that we find in a statement. This doesn't include tuple, list and dict literals, because the stuff they contain is executed. (Used as type information). """ definitions = module_context.eval_node(stmt) return ContextSet.from_sets( _execute_array_values(module_context.evaluator, d) for d in definitions )
def infer(self): # TODO use logic from compiled.CompiledObjectFilter access_paths = self.parent_context.access_handle.getattr_paths( self.string_name, default=None) assert len(access_paths) contexts = [None] for access in access_paths: contexts = ContextSet.from_sets( _create(self._evaluator, access, parent_context=c) if c is None or isinstance(c, MixedObject) else ContextSet( {create_cached_compiled_object(c.evaluator, access, c)}) for c in contexts) return contexts
def _remap_type_vars(self, base): filter = self._class_context.get_type_var_filter() for type_var_set in base.get_generics(): new = NO_CONTEXTS for type_var in type_var_set: if isinstance(type_var, TypeVar): names = filter.get(type_var.py__name__()) new |= ContextSet.from_sets(name.infer() for name in names) else: # Mostly will be type vars, except if in some cases # a concrete type will already be there. In that # case just add it to the context set. new |= ContextSet([type_var]) yield new
def _execute_array_values(evaluator, array): """ Tuples indicate that there's not just one return value, but the listed ones. `(str, int)` means that it returns a tuple with both types. """ from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence if isinstance(array, SequenceLiteralContext): values = [] for lazy_context in array.py__iter__(): objects = ContextSet.from_sets( _execute_array_values(evaluator, typ) for typ in lazy_context.infer()) values.append(LazyKnownContexts(objects)) return {FakeSequence(evaluator, array.array_type, values)} else: return array.execute_evaluated()
def get_metaclasses(self): args = self._get_bases_arguments() if args is not None: m = [value for key, value in args.unpack() if key == 'metaclass'] metaclasses = ContextSet.from_sets(lazy_context.infer() for lazy_context in m) metaclasses = ContextSet(m for m in metaclasses if m.is_class()) if metaclasses: return metaclasses for lazy_base in self.py__bases__(): for context in lazy_base.infer(): if context.is_class(): contexts = context.get_metaclasses() if contexts: return contexts return NO_CONTEXTS
def completion_names(self, evaluator, only_modules=False): """ :param only_modules: Indicates wheter it's possible to import a definition that is not defined in a module. """ if not self._inference_possible: return [] names = [] if self.import_path: # flask if self._str_import_path == ('flask', 'ext'): # List Flask extensions like ``flask_foo`` for mod in self._get_module_names(): modname = mod.string_name if modname.startswith('flask_'): extname = modname[len('flask_'):] names.append(ImportName(self.module_context, extname)) # Now the old style: ``flaskext.foo`` for dir in self._sys_path_with_modifications(): flaskext = os.path.join(dir, 'flaskext') if os.path.isdir(flaskext): names += self._get_module_names([flaskext]) contexts = self.follow() for context in contexts: # Non-modules are not completable. if context.api_type != 'module': # not a module continue names += context.sub_modules_dict().values() if not only_modules: from jedi.evaluate.gradual.conversion import stub_to_actual_context_set both_contexts = ContextSet.from_sets( stub_to_actual_context_set(context, ignore_compiled=True) for context in contexts if context.is_stub()) | contexts for c in both_contexts: for filter in c.get_filters(search_global=False): names += filter.values() else: if self.level: # We only get here if the level cannot be properly calculated. names += self._get_module_names(self._fixed_sys_path) else: # This is just the list of global imports. names += self._get_module_names() return names
def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts): if not left_contexts or not right_contexts: # illegal slices e.g. cause left/right_result to be None result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS) return _literals_to_types(evaluator, result) else: # I don't think there's a reasonable chance that a string # operation is still correct, once we pass something like six # objects. if len(left_contexts) * len(right_contexts) > 6: return _literals_to_types(evaluator, left_contexts | right_contexts) else: return ContextSet.from_sets( _eval_comparison_part(evaluator, context, left, operator, right) for left in left_contexts for right in right_contexts )
def _execute_array_values(evaluator, array): """ Tuples indicate that there's not just one return value, but the listed ones. `(str, int)` means that it returns a tuple with both types. """ from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence if isinstance(array, SequenceLiteralContext): values = [] for lazy_context in array.py__iter__(): objects = ContextSet.from_sets( _execute_array_values(evaluator, typ) for typ in lazy_context.infer() ) values.append(LazyKnownContexts(objects)) return set([FakeSequence(evaluator, array.array_type, values)]) else: return array.execute_evaluated()
def _infer(self, only_stubs=False, prefer_stubs=False): assert not (only_stubs and prefer_stubs) if not self._name.is_context_name: return [] # First we need to make sure that we have stub names (if possible) that # we can follow. If we don't do that, we can end up with the inferred # results of Python objects instead of stubs. names = convert_names([self._name], prefer_stubs=True) contexts = convert_contexts( ContextSet.from_sets(n.infer() for n in names), only_stubs=only_stubs, prefer_stubs=prefer_stubs, ) resulting_names = [c.name for c in contexts] return [self if n == self._name else Definition(self._evaluator, n) for n in resulting_names]
def py__call__(self, arguments): debug.dbg("Execute overloaded function %s", self._wrapped_context, color='BLUE') function_executions = [] context_set = NO_CONTEXTS matched = False for f in self._overloaded_functions: function_execution = f.get_function_execution(arguments) function_executions.append(function_execution) if function_execution.matches_signature(): matched = True return function_execution.infer() if matched: return context_set if self.evaluator.is_analysis: # In this case we want precision. return NO_CONTEXTS return ContextSet.from_sets(fe.infer() for fe in function_executions)
def builtins_next(evaluator, iterators, defaults): """ TODO this function is currently not used. It's a stab at implementing next in a different way than fake objects. This would be a bit more flexible. """ if evaluator.python_version[0] == 2: name = 'next' else: name = '__next__' context_set = NO_CONTEXTS for iterator in iterators: if isinstance(iterator, AbstractInstanceContext): context_set = ContextSet.from_sets( n.infer() for filter in iterator.get_filters(include_self_names=True) for n in filter.get(name)).execute_evaluated() if context_set: return context_set return defaults
def follow(self): if not self.import_path or not self._inference_possible: return NO_CONTEXTS import_names = tuple( force_unicode(i.value if isinstance(i, tree.Name) else i) for i in self.import_path) sys_path = self._sys_path_with_modifications() context_set = [None] for i, name in enumerate(self.import_path): context_set = ContextSet.from_sets([ self._evaluator.import_module(import_names[:i + 1], parent_module_context, sys_path) for parent_module_context in context_set ]) if not context_set: message = 'No module named ' + '.'.join(import_names) _add_error(self.module_context, name, message) return NO_CONTEXTS return context_set
def builtins_next(evaluator, iterators, defaults): """ TODO this function is currently not used. It's a stab at implementing next in a different way than fake objects. This would be a bit more flexible. """ if evaluator.environment.version_info.major == 2: name = 'next' else: name = '__next__' context_set = NO_CONTEXTS for iterator in iterators: if isinstance(iterator, AbstractInstanceContext): context_set = ContextSet.from_sets( n.infer() for filter in iterator.get_filters(include_self_names=True) for n in filter.get(name) ).execute_evaluated() if context_set: return context_set return defaults
def get_return_values(self, check_yields=False): funcdef = self.tree_node if funcdef.type == 'lambdef': return self.eval_node(funcdef.children[-1]) if check_yields: context_set = NO_CONTEXTS returns = get_yield_exprs(self.evaluator, funcdef) else: returns = funcdef.iter_return_stmts() from jedi.evaluate.gradual.annotation import infer_return_types context_set = infer_return_types(self) if context_set: # If there are annotations, prefer them over anything else. # This will make it faster. return context_set context_set |= docstrings.infer_return_types(self.function_context) for r in returns: check = flow_analysis.reachability_check(self, funcdef, r) if check is flow_analysis.UNREACHABLE: debug.dbg('Return unreachable: %s', r) else: if check_yields: context_set |= ContextSet.from_sets( lazy_context.infer() for lazy_context in self._get_yield_lazy_context(r)) else: try: children = r.children except AttributeError: ctx = compiled.builtin_from_name( self.evaluator, u'None') context_set |= ContextSet([ctx]) else: context_set |= self.eval_node(children[1]) if check is flow_analysis.REACHABLE: debug.dbg('Return reachable: %s', r) break return context_set
def wrapper(evaluator, import_names, parent_module_context, sys_path, prefer_stubs): try: python_context_set = evaluator.module_cache.get(import_names) except KeyError: if parent_module_context is not None and parent_module_context.is_stub( ): parent_module_contexts = parent_module_context.non_stub_context_set else: parent_module_contexts = [parent_module_context] if import_names == ('os', 'path'): # This is a huge exception, we follow a nested import # ``os.path``, because it's a very important one in Python # that is being achieved by messing with ``sys.modules`` in # ``os``. python_parent = next(iter(parent_module_contexts)) if python_parent is None: python_parent, = evaluator.import_module( ('os', ), prefer_stubs=False) python_context_set = python_parent.py__getattribute__('path') else: python_context_set = ContextSet.from_sets( func( evaluator, import_names, p, sys_path, ) for p in parent_module_contexts) evaluator.module_cache.add(import_names, python_context_set) if not prefer_stubs: return python_context_set stub = _try_to_load_stub_cached(evaluator, import_names, python_context_set, parent_module_context, sys_path) if stub is not None: return ContextSet([stub]) return python_context_set
def _to_stub(context): if context.is_stub(): return ContextSet([context]) was_instance = context.is_instance() if was_instance: context = context.py__class__() qualified_names = context.get_qualified_names() stub_module = _load_stub_module(context.get_root_context()) if stub_module is None or qualified_names is None: return NO_CONTEXTS stub_contexts = ContextSet([stub_module]) for name in qualified_names: stub_contexts = stub_contexts.py__getattribute__(name) if was_instance: stub_contexts = ContextSet.from_sets(c.execute_evaluated() for c in stub_contexts if c.is_class()) return stub_contexts
def infer_return_types(function_execution_context): """ Infers the type of a function's return value, according to type annotations. """ all_annotations = py__annotations__(function_execution_context.tree_node) annotation = all_annotations.get("return", None) if annotation is None: # If there is no Python 3-type annotation, look for a Python 2-type annotation node = function_execution_context.tree_node comment = parser_utils.get_following_comment_same_line(node) if comment is None: return NO_CONTEXTS match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment) if not match: return NO_CONTEXTS return _evaluate_annotation_string( function_execution_context.function_context. get_default_param_context(), match.group(1).strip()).execute_annotation() if annotation is None: return NO_CONTEXTS context = function_execution_context.function_context.get_default_param_context( ) unknown_type_vars = list(find_unknown_type_vars(context, annotation)) annotation_contexts = eval_annotation(context, annotation) if not unknown_type_vars: return annotation_contexts.execute_annotation() type_var_dict = infer_type_vars_for_execution(function_execution_context, all_annotations) return ContextSet.from_sets( ann.define_generics(type_var_dict) if isinstance( ann, (AbstractAnnotatedClass, TypeVar)) else ContextSet({ann}) for ann in annotation_contexts).execute_annotation()
def builtins_isinstance(objects, types, arguments, evaluator): bool_results = set() for o in objects: cls = o.py__class__() try: cls.py__bases__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. bool_results = set([True, False]) break mro = list(cls.py__mro__()) for cls_or_tup in types: if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) elif cls_or_tup.name.string_name == 'tuple' \ and cls_or_tup.get_root_context() == evaluator.builtins_module: # Check for tuples. classes = ContextSet.from_sets( lazy_context.infer() for lazy_context in cls_or_tup.iterate() ) bool_results.add(any(cls in mro for cls in classes)) else: _, lazy_context = list(arguments.unpack())[1] if isinstance(lazy_context, LazyTreeContext): node = lazy_context.data message = 'TypeError: isinstance() arg 2 must be a ' \ 'class, type, or tuple of classes and types, ' \ 'not %s.' % cls_or_tup analysis.add(lazy_context.context, 'type-error-isinstance', node, message) return ContextSet( compiled.builtin_from_name(evaluator, force_unicode(str(b))) for b in bool_results )
def _names_to_types(self, names, attribute_lookup): contexts = ContextSet.from_sets(name.infer() for name in names) debug.dbg('finder._names_to_types: %s -> %s', names, contexts) if not names and isinstance(self._context, AbstractInstanceContext): # handling __getattr__ / __getattribute__ return self._check_getattr(self._context) # Add isinstance and other if/assert knowledge. if not contexts and isinstance(self._name, tree.Name) and \ not isinstance(self._name_context, AbstractInstanceContext): flow_scope = self._name base_node = self._name_context.tree_node if base_node.type == 'comp_for': return contexts while True: flow_scope = get_parent_scope(flow_scope, include_flows=True) n = _check_flow_information(self._name_context, flow_scope, self._name, self._position) if n is not None: return n if flow_scope == base_node: break return contexts
def py__next__(self): return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
def py__getitem__(self, index): return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
def dict_values(self): return ContextSet.from_sets( self._defining_context.eval_node(v) for k, v in self._items() )
def py__next__(self): # TODO add TypeError if params are given. return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
def _values(self): """Returns a list of a list of node.""" if self.array_type == u'dict': return ContextSet.from_sets(v for k, v in self._items()) else: return self._items()
def dict_values(self): return ContextSet.from_sets(values for keys, values in self._iterate())
def dict_values(self): return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values())
def infer(self): return ContextSet.from_sets(p.infer() for p in self._executed_params)
def infer(self): return ContextSet.from_sets(l.infer() for l in self.data)
def execute(arguments): return ContextSet.from_sets(name.execute(arguments) for name in names)
def execute_function_slots(self, names, *evaluated_args): return ContextSet.from_sets( name.execute_evaluated(*evaluated_args) for name in names )