def infer(self): return ValueSet([self._lambda_value])
def _infer_expr_stmt(context, stmt, seek_name=None): """ The starting point of the completion. A statement always owns a call list, which are the calls, that a statement does. In case multiple names are defined in the statement, `seek_name` returns the result for this name. expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) annassign: ':' test ['=' test] augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') :param stmt: A `tree.ExprStmt`. """ def check_setitem(stmt): atom_expr = stmt.children[0] if atom_expr.type not in ('atom_expr', 'power'): return False, None name = atom_expr.children[0] if name.type != 'name' or len(atom_expr.children) != 2: return False, None trailer = atom_expr.children[-1] return trailer.children[0] == '[', trailer.children[1] debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name) rhs = stmt.get_rhs() value_set = context.infer_node(rhs) if seek_name: n = TreeNameDefinition(context, seek_name) value_set = check_tuple_assignments(n, value_set) first_operator = next(stmt.yield_operators(), None) is_setitem, subscriptlist = check_setitem(stmt) is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator' if is_annassign or is_setitem: # `=` is always the last character in aug assignments -> -1 name = stmt.get_defined_names(include_setitem=True)[0].value left_values = context.py__getattribute__(name, position=stmt.start_pos) if is_setitem: def to_mod(v): c = ContextualizedSubscriptListNode(context, subscriptlist) if v.array_type == 'dict': return DictModification(v, value_set, c) elif v.array_type == 'list': return ListModification(v, value_set, c) return v value_set = ValueSet(to_mod(v) for v in left_values) else: operator = copy.copy(first_operator) operator.value = operator.value[:-1] for_stmt = tree.search_ancestor(stmt, 'for_stmt') if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \ and parser_utils.for_stmt_defines_one_name(for_stmt): # Iterate through result and add the values, that's possible # only in for loops without clutter, because they are # predictable. Also only do it, if the variable is not a tuple. node = for_stmt.get_testlist() cn = ContextualizedNode(context, node) ordered = list(cn.infer().iterate(cn)) for lazy_value in ordered: dct = {for_stmt.children[1].value: lazy_value.infer()} with context.predefine_names(for_stmt, dct): t = context.infer_node(rhs) left_values = _infer_comparison(context, left_values, operator, t) value_set = left_values else: value_set = _infer_comparison(context, left_values, operator, value_set) debug.dbg('infer_expr_stmt result %s', value_set) return value_set
def tree_name_to_values(inference_state, context, tree_name): value_set = NO_VALUES module_node = context.get_root_context().tree_node # First check for annotations, like: `foo: int = 3` if module_node is not None: names = module_node.get_used_names().get(tree_name.value, []) for name in names: expr_stmt = name.parent if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign": correct_scope = parser_utils.get_parent_scope(name) == context.tree_node if correct_scope: value_set |= annotation.infer_annotation( context, expr_stmt.children[1].children[1] ).execute_annotation() if value_set: return value_set types = [] node = tree_name.get_definition(import_name_always=True, include_setitem=True) if node is None: node = tree_name.parent if node.type == 'global_stmt': c = context.create_context(tree_name) if c.is_module(): # In case we are already part of the module, there is no point # in looking up the global statement anymore, because it's not # valid at that point anyway. return NO_VALUES # For global_stmt lookups, we only need the first possible scope, # which means the function itself. filter = next(c.get_filters()) names = filter.get(tree_name.value) return ValueSet.from_sets(name.infer() for name in names) elif node.type not in ('import_from', 'import_name'): c = context.create_context(tree_name) return infer_atom(c, tree_name) typ = node.type if typ == 'for_stmt': types = annotation.find_type_from_comment_hint_for(context, node, tree_name) if types: return types if typ == 'with_stmt': types = annotation.find_type_from_comment_hint_with(context, node, tree_name) if types: return types if typ in ('for_stmt', 'comp_for', 'sync_comp_for'): try: types = context.predefined_names[node][tree_name.value] except KeyError: cn = ContextualizedNode(context, node.children[3]) for_types = iterate_values( cn.infer(), contextualized_node=cn, is_async=node.parent.type == 'async_stmt', ) n = TreeNameDefinition(context, tree_name) types = check_tuple_assignments(n, for_types) elif typ == 'expr_stmt': types = _remove_statements(context, node, tree_name) elif typ == 'with_stmt': value_managers = context.infer_node(node.get_test_node_from_name(tree_name)) enter_methods = value_managers.py__getattribute__(u'__enter__') return enter_methods.execute_with_values() elif typ in ('import_from', 'import_name'): types = imports.infer_import(context, tree_name) elif typ in ('funcdef', 'classdef'): types = _apply_decorators(context, node) elif typ == 'try_stmt': # TODO an exception can also be a tuple. Check for those. # TODO check for types that are not classes and add it to # the static analysis report. exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling()) types = exceptions.execute_with_values() elif node.type == 'param': types = NO_VALUES else: raise ValueError("Should not happen. type: %s" % typ) return types
def py__next__(self): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def _infer_node(context, element): debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context) inference_state = context.inference_state typ = element.type if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'): return infer_atom(context, element) elif typ == 'lambdef': return ValueSet([FunctionValue.from_context(context, element)]) elif typ == 'expr_stmt': return infer_expr_stmt(context, element) elif typ in ('power', 'atom_expr'): first_child = element.children[0] children = element.children[1:] had_await = False if first_child.type == 'keyword' and first_child.value == 'await': had_await = True first_child = children.pop(0) value_set = context.infer_node(first_child) for (i, trailer) in enumerate(children): if trailer == '**': # has a power operation. right = context.infer_node(children[i + 1]) value_set = _infer_comparison( context, value_set, trailer, right ) break value_set = infer_trailer(context, value_set, trailer) if had_await: return value_set.py__await__().py__stop_iteration_returns() return value_set elif typ in ('testlist_star_expr', 'testlist',): # The implicit tuple in statements. return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)]) elif typ in ('not_test', 'factor'): value_set = context.infer_node(element.children[-1]) for operator in element.children[:-1]: value_set = infer_factor(value_set, operator) return value_set elif typ == 'test': # `x if foo else y` case. return (context.infer_node(element.children[0]) | context.infer_node(element.children[-1])) elif typ == 'operator': # Must be an ellipsis, other operators are not inferred. # In Python 2 ellipsis is coded as three single dot tokens, not # as one token 3 dot token. if element.value not in ('.', '...'): origin = element.parent raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) return ValueSet([compiled.builtin_from_name(inference_state, u'Ellipsis')]) elif typ == 'dotted_name': value_set = infer_atom(context, element.children[0]) for next_name in element.children[2::2]: value_set = value_set.py__getattribute__(next_name, name_context=context) return value_set elif typ == 'eval_input': return context.infer_node(element.children[0]) elif typ == 'annassign': return annotation.infer_annotation(context, element.children[1]) \ .execute_annotation() elif typ == 'yield_expr': if len(element.children) and element.children[1].type == 'yield_arg': # Implies that it's a yield from. element = element.children[1].children[1] generators = context.infer_node(element) \ .py__getattribute__('__iter__').execute_with_values() return generators.py__stop_iteration_returns() # Generator.send() is not implemented. return NO_VALUES elif typ == 'namedexpr_test': return context.infer_node(element.children[2]) else: return infer_or_test(context, element)
def _dict_values(self): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self._dct.values())
def py__simple_getitem__(self, index): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def py__get__on_class(self, calling_instance, instance, class_value): # This is mostly an optimization to avoid Jedi aborting inference, # because of too many function executions of Field.__get__. return ValueSet({calling_instance})
def py__get__(self, instance, class_value): return ValueSet({ QuerySetBoundMethodWrapper(v, self._model_cls) for v in self._wrapped_value.py__get__(instance, class_value) })
def py__get__on_class(self, calling_instance, instance, class_value): return calling_instance.class_value.with_generics( (ValueSet({class_value }), )).py__call__(calling_instance._arguments)
def py__getitem__(self, index_value_set, contextualized_node): return ValueSet( GenericFieldWrapper(generic) for generic in self._wrapped_value.py__getitem__( index_value_set, contextualized_node))
def infer(self): return ValueSet([self._instance])
def execute_function_slots(self, names, *inferred_args): return ValueSet.from_sets( name.infer().execute_with_values(*inferred_args) for name in names)
def import_module(inference_state, import_names, parent_module_value, sys_path): """ This method is very similar to importlib's `_gcd_import`. """ if import_names[0] in settings.auto_import_modules: module = _load_builtin_module(inference_state, import_names, sys_path) if module is None: return NO_VALUES return ValueSet([module]) module_name = '.'.join(import_names) if parent_module_value is None: # Override the sys.path. It works only good that way. # Injecting the path directly into `find_module` did not work. file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info( string=import_names[-1], full_name=module_name, sys_path=sys_path, is_global_search=True, ) if is_pkg is None: return NO_VALUES else: paths = parent_module_value.py__path__() if paths is None: # The module might not be a package. return NO_VALUES for path in paths: # At the moment we are only using one path. So this is # not important to be correct. if not isinstance(path, list): path = [path] file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info( string=import_names[-1], path=path, full_name=module_name, is_global_search=False, ) if is_pkg is not None: break else: return NO_VALUES if isinstance(file_io_or_ns, ImplicitNSInfo): from jedi.inference.value.namespace import ImplicitNamespaceValue module = ImplicitNamespaceValue( inference_state, string_names=tuple(file_io_or_ns.name.split('.')), paths=file_io_or_ns.paths, ) elif file_io_or_ns is None: module = _load_builtin_module(inference_state, import_names, sys_path) if module is None: return NO_VALUES else: module = _load_python_module( inference_state, file_io_or_ns, import_names=import_names, is_package=is_pkg, ) if parent_module_value is None: debug.dbg('global search_module %s: %s', import_names[-1], module) else: debug.dbg('search_module %s in paths %s: %s', module_name, paths, module) return ValueSet([module])
def _dict_keys(self): return ValueSet.from_sets( self._defining_context.infer_node(k) for k, v in self.get_tree_entries())
def infer(self): return ValueSet([_create_from_name( self._inference_state, self.parent_context, self.string_name )])
def _values(self): return ValueSet([ FakeTuple(self.inference_state, [LazyKnownValues(self._dict_values())]) ])
def _iter(self, arguments): return ValueSet([self])
def _dict_keys(self): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def _next(self, arguments): return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def py__iter__(self, contextualized_node=None): return ValueSet([self])
def _infer_comparison_part(inference_state, context, left, operator, right): l_is_num = is_number(left) r_is_num = is_number(right) if isinstance(operator, unicode): str_operator = operator else: str_operator = force_unicode(str(operator.value)) if str_operator == '*': # for iterables, ignore * operations if isinstance(left, iterable.Sequence) or is_string(left): return ValueSet([left]) elif isinstance(right, iterable.Sequence) or is_string(right): return ValueSet([right]) elif str_operator == '+': if l_is_num and r_is_num or is_string(left) and is_string(right): return left.execute_operation(right, str_operator) elif _is_list(left) and _is_list(right) or _is_tuple( left) and _is_tuple(right): return ValueSet( [iterable.MergedArray(inference_state, (left, right))]) elif str_operator == '-': if l_is_num and r_is_num: return left.execute_operation(right, str_operator) elif str_operator == '%': # With strings and numbers the left type typically remains. Except for # `int() % float()`. return ValueSet([left]) elif str_operator in COMPARISON_OPERATORS: if left.is_compiled() and right.is_compiled(): # Possible, because the return is not an option. Just compare. result = left.execute_operation(right, str_operator) if result: return result else: if str_operator in ('is', '!=', '==', 'is not'): operation = COMPARISON_OPERATORS[str_operator] bool_ = operation(left, right) # Only if == returns True or != returns False, we can continue. # There's no guarantee that they are not equal. This can help # in some cases, but does not cover everything. if (str_operator in ('is', '==')) == bool_: return ValueSet([_bool_to_value(inference_state, bool_)]) if isinstance(left, VersionInfo): version_info = _get_tuple_ints(right) if version_info is not None: bool_result = compiled.access.COMPARISON_OPERATORS[ operator](inference_state.environment.version_info, tuple(version_info)) return ValueSet( [_bool_to_value(inference_state, bool_result)]) return ValueSet([ _bool_to_value(inference_state, True), _bool_to_value(inference_state, False) ]) elif str_operator in ('in', 'not in'): return NO_VALUES def check(obj): """Checks if a Jedi object is either a float or an int.""" return isinstance(obj, TreeInstance) and \ obj.name.string_name in ('int', 'float') # Static analysis, one is a number, the other one is not. if str_operator in ('+', '-') and l_is_num != r_is_num \ and not (check(left) or check(right)): message = "TypeError: unsupported operand type(s) for +: %s and %s" analysis.add(context, 'type-error-operation', operator, message % (left, right)) if left.is_class() or right.is_class(): return NO_VALUES method_name = operator_to_magic_method[str_operator] magic_methods = left.py__getattribute__(method_name) if magic_methods: result = magic_methods.execute_with_values(right) if result: return result if not magic_methods: reverse_method_name = reverse_operator_to_magic_method[str_operator] magic_methods = right.py__getattribute__(reverse_method_name) result = magic_methods.execute_with_values(left) if result: return result result = ValueSet([left, right]) debug.dbg('Used operator %s resulting in %s', operator, result) return result
def py__stop_iteration_returns(self): return ValueSet( [compiled.builtin_from_name(self.inference_state, u'None')])
def py__getitem__(self, index_value_set, contextualized_node): if self.array_type == 'dict': return self._dict_values() return iterate_values(ValueSet([self]))
def infer_atom(context, atom): """ Basically to process ``atom`` nodes. The parser sometimes doesn't generate the node (because it has just one child). In that case an atom might be a name or a literal as well. """ state = context.inference_state if atom.type == 'name': if atom.value in ('True', 'False', 'None'): # Python 2... return ValueSet([compiled.builtin_from_name(state, atom.value)]) # This is the first global lookup. stmt = tree.search_ancestor( atom, 'expr_stmt', 'lambdef' ) or atom if stmt.type == 'lambdef': stmt = atom position = stmt.start_pos if _is_annotation_name(atom): # Since Python 3.7 (with from __future__ import annotations), # annotations are essentially strings and can reference objects # that are defined further down in code. Therefore just set the # position to None, so the finder will not try to stop at a certain # position in the module. position = None return context.py__getattribute__(atom, position=position) elif atom.type == 'keyword': # For False/True/None if atom.value in ('False', 'True', 'None'): return ValueSet([compiled.builtin_from_name(state, atom.value)]) elif atom.value == 'print': # print e.g. could be inferred like this in Python 2.7 return NO_VALUES elif atom.value == 'yield': # Contrary to yield from, yield can just appear alone to return a # value when used with `.send()`. return NO_VALUES assert False, 'Cannot infer the keyword %s' % atom elif isinstance(atom, tree.Literal): string = state.compiled_subprocess.safe_literal_eval(atom.value) return ValueSet([compiled.create_simple_object(state, string)]) elif atom.type == 'strings': # Will be multiple string. value_set = infer_atom(context, atom.children[0]) for string in atom.children[1:]: right = infer_atom(context, string) value_set = _infer_comparison(context, value_set, u'+', right) return value_set elif atom.type == 'fstring': return compiled.get_string_value_set(state) else: c = atom.children # Parentheses without commas are not tuples. if c[0] == '(' and not len(c) == 2 \ and not(c[1].type == 'testlist_comp' and len(c[1].children) > 1): return context.infer_node(c[1]) try: comp_for = c[1].children[1] except (IndexError, AttributeError): pass else: if comp_for == ':': # Dict comprehensions have a colon at the 3rd index. try: comp_for = c[1].children[3] except IndexError: pass if comp_for.type in ('comp_for', 'sync_comp_for'): return ValueSet([iterable.comprehension_from_atom( state, context, atom )]) # It's a dict/list/tuple literal. array_node = c[1] try: array_node_c = array_node.children except AttributeError: array_node_c = [] if c[0] == '{' and (array_node == '}' or ':' in array_node_c or '**' in array_node_c): new_value = iterable.DictLiteralValue(state, context, atom) else: new_value = iterable.SequenceLiteralValue(state, context, atom) return ValueSet([new_value])
def _dict_values(self): return ValueSet.from_sets(values for keys, values in self._iterate())
def _infer_comparison_part(inference_state, context, left, operator, right): l_is_num = is_number(left) r_is_num = is_number(right) if isinstance(operator, unicode): str_operator = operator else: str_operator = force_unicode(str(operator.value)) if str_operator == '*': # for iterables, ignore * operations if isinstance(left, iterable.Sequence) or is_string(left): return ValueSet([left]) elif isinstance(right, iterable.Sequence) or is_string(right): return ValueSet([right]) elif str_operator == '+': if l_is_num and r_is_num or is_string(left) and is_string(right): return ValueSet([left.execute_operation(right, str_operator)]) elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): return ValueSet([iterable.MergedArray(inference_state, (left, right))]) elif str_operator == '-': if l_is_num and r_is_num: return ValueSet([left.execute_operation(right, str_operator)]) elif str_operator == '%': # With strings and numbers the left type typically remains. Except for # `int() % float()`. return ValueSet([left]) elif str_operator in COMPARISON_OPERATORS: if left.is_compiled() and right.is_compiled(): # Possible, because the return is not an option. Just compare. try: return ValueSet([left.execute_operation(right, str_operator)]) except TypeError: # Could be True or False. pass else: if str_operator in ('is', '!=', '==', 'is not'): operation = COMPARISON_OPERATORS[str_operator] bool_ = operation(left, right) return ValueSet([_bool_to_value(inference_state, bool_)]) if isinstance(left, VersionInfo): version_info = _get_tuple_ints(right) if version_info is not None: bool_result = compiled.access.COMPARISON_OPERATORS[operator]( inference_state.environment.version_info, tuple(version_info) ) return ValueSet([_bool_to_value(inference_state, bool_result)]) return ValueSet([ _bool_to_value(inference_state, True), _bool_to_value(inference_state, False) ]) elif str_operator == 'in': return NO_VALUES def check(obj): """Checks if a Jedi object is either a float or an int.""" return isinstance(obj, TreeInstance) and \ obj.name.string_name in ('int', 'float') # Static analysis, one is a number, the other one is not. if str_operator in ('+', '-') and l_is_num != r_is_num \ and not (check(left) or check(right)): message = "TypeError: unsupported operand type(s) for +: %s and %s" analysis.add(context, 'type-error-operation', operator, message % (left, right)) result = ValueSet([left, right]) debug.dbg('Used operator %s resulting in %s', operator, result) return result
def _imitate_values(self): lazy_value = LazyKnownValues(self._dict_values()) return ValueSet([FakeList(self.inference_state, [lazy_value])])
def infer_node(context, element): if isinstance(context, CompForContext): return _infer_node(context, element) if_stmt = element while if_stmt is not None: if_stmt = if_stmt.parent if if_stmt.type in ('if_stmt', 'for_stmt'): break if parser_utils.is_scope(if_stmt): if_stmt = None break predefined_if_name_dict = context.predefined_names.get(if_stmt) # TODO there's a lot of issues with this one. We actually should do # this in a different way. Caching should only be active in certain # cases and this all sucks. if predefined_if_name_dict is None and if_stmt \ and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis: if_stmt_test = if_stmt.children[1] name_dicts = [{}] # If we already did a check, we don't want to do it again -> If # value.predefined_names is filled, we stop. # We don't want to check the if stmt itself, it's just about # the content. if element.start_pos > if_stmt_test.end_pos: # Now we need to check if the names in the if_stmt match the # names in the suite. if_names = get_names_of_node(if_stmt_test) element_names = get_names_of_node(element) str_element_names = [e.value for e in element_names] if any(i.value in str_element_names for i in if_names): for if_name in if_names: definitions = context.inference_state.infer(context, if_name) # Every name that has multiple different definitions # causes the complexity to rise. The complexity should # never fall below 1. if len(definitions) > 1: if len(name_dicts) * len(definitions) > 16: debug.dbg('Too many options for if branch inference %s.', if_stmt) # There's only a certain amount of branches # Jedi can infer, otherwise it will take to # long. name_dicts = [{}] break original_name_dicts = list(name_dicts) name_dicts = [] for definition in definitions: new_name_dicts = list(original_name_dicts) for i, name_dict in enumerate(new_name_dicts): new_name_dicts[i] = name_dict.copy() new_name_dicts[i][if_name.value] = ValueSet([definition]) name_dicts += new_name_dicts else: for name_dict in name_dicts: name_dict[if_name.value] = definitions if len(name_dicts) > 1: result = NO_VALUES for name_dict in name_dicts: with context.predefine_names(if_stmt, name_dict): result |= _infer_node(context, element) return result else: return _infer_node_if_inferred(context, element) else: if predefined_if_name_dict: return _infer_node(context, element) else: return _infer_node_if_inferred(context, element)
def merge_yield_values(self, is_async=False): return ValueSet.from_sets( lazy_value.infer() for lazy_value in self.get_yield_lazy_values())