def builtins_isinstance(objects, types, arguments, inference_state): bool_results = set() for o in objects: cls = o.py__class__() try: cls.py__bases__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. bool_results = set([True, False]) break mro = list(cls.py__mro__()) for cls_or_tup in types: if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) elif cls_or_tup.name.string_name == 'tuple' \ and cls_or_tup.get_root_context().is_builtins_module(): # Check for tuples. classes = ValueSet.from_sets( lazy_value.infer() for lazy_value in cls_or_tup.iterate()) bool_results.add(any(cls in mro for cls in classes)) else: _, lazy_value = list(arguments.unpack())[1] if isinstance(lazy_value, LazyTreeValue): node = lazy_value.data message = 'TypeError: isinstance() arg 2 must be a ' \ 'class, type, or tuple of classes and types, ' \ 'not %s.' % cls_or_tup analysis.add(lazy_value.context, 'type-error-isinstance', node, message) return ValueSet( compiled.builtin_from_name(inference_state, force_unicode(str(b))) for b in bool_results)
def infer_return_types(function, arguments): """ Infers the type of a function's return value, according to type annotations. """ all_annotations = py__annotations__(function.tree_node) annotation = all_annotations.get("return", None) if annotation is None: # If there is no Python 3-type annotation, look for an annotation # comment. node = function.tree_node comment = parser_utils.get_following_comment_same_line(node) if comment is None: return NO_VALUES match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment) if not match: return NO_VALUES return _infer_annotation_string( function.get_default_param_context(), match.group(1).strip() ).execute_annotation() context = function.get_default_param_context() unknown_type_vars = find_unknown_type_vars(context, annotation) annotation_values = infer_annotation(context, annotation) if not unknown_type_vars: return annotation_values.execute_annotation() type_var_dict = infer_type_vars_for_execution(function, arguments, all_annotations) return ValueSet.from_sets( ann.define_generics(type_var_dict) if isinstance(ann, (DefineGenericBaseClass, TypeVar)) else ValueSet({ann}) for ann in annotation_values ).execute_annotation()
def _stub_to_python_value_set(stub_value, ignore_compiled=False): stub_module_context = stub_value.get_root_context() if not stub_module_context.is_stub(): return ValueSet([stub_value]) decorates = None if isinstance(stub_value, Decoratee): decorates = stub_value._original_value was_instance = stub_value.is_instance() if was_instance: stub_value = stub_value.py__class__() qualified_names = stub_value.get_qualified_names() if qualified_names is None: return NO_VALUES was_bound_method = stub_value.is_bound_method() if was_bound_method: # Infer the object first. We can infer the method later. method_name = qualified_names[-1] qualified_names = qualified_names[:-1] was_instance = True values = _infer_from_stub(stub_module_context, qualified_names, ignore_compiled) if was_instance: values = ValueSet.from_sets(c.execute_with_values() for c in values if c.is_class()) if was_bound_method: # Now that the instance has been properly created, we can simply get # the method. values = values.py__getattribute__(method_name) if decorates is not None: values = ValueSet(Decoratee(v, decorates) for v in values) return values
def infer(self, *, only_stubs=False, prefer_stubs=False): """ Like :meth:`.Script.infer`, it can be useful to understand which type the current name has. Return the actual definitions. I strongly recommend not using it for your completions, because it might slow down |jedi|. If you want to read only a few objects (<=20), it might be useful, especially to get the original docstrings. The basic problem of this function is that it follows all results. This means with 1000 completions (e.g. numpy), it's just very, very slow. :param only_stubs: Only return stubs for this goto call. :param prefer_stubs: Prefer stubs to Python objects for this type inference call. :rtype: list of :class:`Name` """ assert not (only_stubs and prefer_stubs) if not self._name.is_value_name: return [] # First we need to make sure that we have stub names (if possible) that # we can follow. If we don't do that, we can end up with the inferred # results of Python objects instead of stubs. names = convert_names([self._name], prefer_stubs=True) values = convert_values( ValueSet.from_sets(n.infer() for n in names), only_stubs=only_stubs, prefer_stubs=prefer_stubs, ) resulting_names = [c.name for c in values] return [ self if n == self._name else Name(self._inference_state, n) for n in resulting_names ]
def execute_function_slots(self, names, *inferred_args): return ValueSet.from_sets( name.infer().execute_with_values(*inferred_args) for name in names)
def _random_choice(sequences): return ValueSet.from_sets( lazy_value.infer() for sequence in sequences for lazy_value in sequence.py__iter__() )
def _next(self, arguments): return ValueSet.from_sets( lazy_value.infer() for lazy_value in self._iter_list )
def py__simple_getitem__(self, index): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def infer(self): return ValueSet.from_sets(l.infer() for l in self.data)
def py__getitem__(self, index_value_set, contextualized_node): if self._is_homogenous(): return self._generics_manager.get_index_and_execute(0) return ValueSet.from_sets( self._generics_manager.to_tuple()).execute_annotation()
def tree_name_to_values(inference_state, context, tree_name): value_set = NO_VALUES module_node = context.get_root_context().tree_node # First check for annotations, like: `foo: int = 3` if module_node is not None: names = module_node.get_used_names().get(tree_name.value, []) found_annotation = False for name in names: expr_stmt = name.parent if expr_stmt.type == "expr_stmt" and expr_stmt.children[ 1].type == "annassign": correct_scope = parser_utils.get_parent_scope( name) == context.tree_node if correct_scope: found_annotation = True value_set |= annotation.infer_annotation( context, expr_stmt.children[1].children[1]).execute_annotation( ) if found_annotation: return value_set types = [] node = tree_name.get_definition(import_name_always=True, include_setitem=True) if node is None: node = tree_name.parent if node.type == 'global_stmt': c = context.create_context(tree_name) if c.is_module(): # In case we are already part of the module, there is no point # in looking up the global statement anymore, because it's not # valid at that point anyway. return NO_VALUES # For global_stmt lookups, we only need the first possible scope, # which means the function itself. filter = next(c.get_filters()) names = filter.get(tree_name.value) return ValueSet.from_sets(name.infer() for name in names) elif node.type not in ('import_from', 'import_name'): c = context.create_context(tree_name) return infer_atom(c, tree_name) typ = node.type if typ == 'for_stmt': types = annotation.find_type_from_comment_hint_for( context, node, tree_name) if types: return types if typ == 'with_stmt': types = annotation.find_type_from_comment_hint_with( context, node, tree_name) if types: return types if typ in ('for_stmt', 'comp_for', 'sync_comp_for'): try: types = context.predefined_names[node][tree_name.value] except KeyError: cn = ContextualizedNode(context, node.children[3]) for_types = iterate_values( cn.infer(), contextualized_node=cn, is_async=node.parent.type == 'async_stmt', ) n = TreeNameDefinition(context, tree_name) types = check_tuple_assignments(n, for_types) elif typ == 'expr_stmt': types = infer_expr_stmt(context, node, tree_name) elif typ == 'with_stmt': value_managers = context.infer_node( node.get_test_node_from_name(tree_name)) if node.parent.type == 'async_stmt': # In the case of `async with` statements, we need to # first get the coroutine from the `__aenter__` method, # then "unwrap" via the `__await__` method enter_methods = value_managers.py__getattribute__('__aenter__') coro = enter_methods.execute_with_values() return coro.py__await__().py__stop_iteration_returns() enter_methods = value_managers.py__getattribute__('__enter__') return enter_methods.execute_with_values() elif typ in ('import_from', 'import_name'): types = imports.infer_import(context, tree_name) elif typ in ('funcdef', 'classdef'): types = _apply_decorators(context, node) elif typ == 'try_stmt': # TODO an exception can also be a tuple. Check for those. # TODO check for types that are not classes and add it to # the static analysis report. exceptions = context.infer_node( tree_name.get_previous_sibling().get_previous_sibling()) types = exceptions.execute_with_values() elif typ == 'param': types = NO_VALUES elif typ == 'del_stmt': types = NO_VALUES elif typ == 'namedexpr_test': types = infer_node(context, node) else: raise ValueError("Should not happen. type: %s" % typ) return types
def _dict_values(self): return ValueSet.from_sets(values for keys, values in self._iterate())
def py__next__(self): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def constraints(self): return ValueSet.from_sets(lazy.infer() for lazy in self._constraints_lazy_values)
def _complete_getattr(user_context, instance): """ A heuristic to make completion for proxy objects work. This is not intended to work in all cases. It works exactly in this case: def __getattr__(self, name): ... return getattr(any_object, name) It is important that the return contains getattr directly, otherwise it won't work anymore. It's really just a stupid heuristic. It will not work if you write e.g. `return (getatr(o, name))`, because of the additional parentheses. It will also not work if you move the getattr to some other place that is not the return statement itself. It is intentional that it doesn't work in all cases. Generally it's really hard to do even this case (as you can see below). Most people will write it like this anyway and the other ones, well they are just out of luck I guess :) ~dave. """ names = (instance.get_function_slot_names(u'__getattr__') or instance.get_function_slot_names(u'__getattribute__')) functions = ValueSet.from_sets( name.infer() for name in names ) for func in functions: tree_node = func.tree_node if tree_node is None or tree_node.type != 'funcdef': continue for return_stmt in tree_node.iter_return_stmts(): # Basically until the next comment we just try to find out if a # return statement looks exactly like `return getattr(x, name)`. if return_stmt.type != 'return_stmt': continue atom_expr = return_stmt.children[1] if atom_expr.type != 'atom_expr': continue atom = atom_expr.children[0] trailer = atom_expr.children[1] if len(atom_expr.children) != 2 or atom.type != 'name' \ or atom.value != 'getattr': continue arglist = trailer.children[1] if arglist.type != 'arglist' or len(arglist.children) < 3: continue context = func.as_context() object_node = arglist.children[0] # Make sure it's a param: foo in __getattr__(self, foo) name_node = arglist.children[2] name_list = context.goto(name_node, name_node.start_pos) if not any(n.api_type == 'param' for n in name_list): continue # Now that we know that these are most probably completion # objects, we just infer the object and return them as # completions. objects = context.infer_node(object_node) return complete_trailer(user_context, objects) return []
def gather_annotation_classes(self): return ValueSet.from_sets( _iter_over_arguments(self._index_value, self._context_of_index) )
def merge_yield_values(self, is_async=False): return ValueSet.from_sets( lazy_value.infer() for lazy_value in self.get_yield_lazy_values())
def _dict_keys(self): return ValueSet.from_sets( self._defining_context.infer_node(k) for k, v in self.get_tree_entries())
def gather_annotation_classes(self): return ValueSet.from_sets(self._generics_manager.to_tuple())
def _dict_values(self): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self._dct.values())
def py__simple_getitem__(self, index): if isinstance(index, str): return ValueSet.from_sets( name.infer() for filter in self._definition_class.get_filters( is_instance=True) for name in filter.get(index)) return NO_VALUES
def _dict_keys(self): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())