def py__getattribute__(self, name_or_str, name_context=None, position=None, analysis_errors=True): """ :param position: Position of the last statement -> tuple of line, column """ if name_context is None: name_context = self names = self.goto(name_or_str, position) string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str # This paragraph is currently needed for proper branch type inference # (static analysis). found_predefined_types = None if self.predefined_names and isinstance(name_or_str, Name): node = name_or_str while node is not None and not parser_utils.is_scope(node): node = node.parent if node.type in ("if_stmt", "for_stmt", "comp_for", 'sync_comp_for'): try: name_dict = self.predefined_names[node] types = name_dict[string_name] except KeyError: continue else: found_predefined_types = types break if found_predefined_types is not None and names: from jedi.inference import flow_analysis check = flow_analysis.reachability_check( context=self, value_scope=self.tree_node, node=name_or_str, ) if check is flow_analysis.UNREACHABLE: values = NO_VALUES else: values = found_predefined_types else: values = ValueSet.from_sets(name.infer() for name in names) if not names and not values and analysis_errors: if isinstance(name_or_str, Name): from jedi.inference import analysis message = ("NameError: name '%s' is not defined." % string_name) analysis.add(name_context, 'name-error', name_or_str, message) debug.dbg('context.names_to_types: %s -> %s', names, values) if values: return values return self._check_for_additional_knowledge(name_or_str, name_context, position)
def py__iter__(self, contextualized_node=None): if contextualized_node is not None: from jedi.inference import analysis analysis.add(contextualized_node.context, 'type-error-not-iterable', contextualized_node.node, message="TypeError: '%s' object is not iterable" % self) return iter([])
def py__getitem__(self, index_value_set, contextualized_node): from jedi.inference import analysis # TODO this value is probably not right. analysis.add(contextualized_node.context, 'type-error-not-subscriptable', contextualized_node.node, message="TypeError: '%s' object is not subscriptable" % self) return NO_VALUES
def _iterate_star_args(context, array, input_node, funcdef=None): if not array.py__getattribute__('__iter__'): if funcdef is not None: # TODO this funcdef should not be needed. m = "TypeError: %s() argument after * must be a sequence, not %s" \ % (funcdef.name.value, array) analysis.add(context, 'type-error-star', input_node, message=m) try: iter_ = array.py__iter__ except AttributeError: pass else: yield from iter_()
def _star_star_dict(context, array, input_node, funcdef): from jedi.inference.value.instance import CompiledInstance if isinstance(array, CompiledInstance) and array.name.string_name == 'dict': # For now ignore this case. In the future add proper iterators and just # make one call without crazy isinstance checks. return {} elif isinstance(array, iterable.Sequence) and array.array_type == 'dict': return array.exact_key_items() else: if funcdef is not None: m = "TypeError: %s argument after ** must be a mapping, not %s" \ % (funcdef.name.value, array) analysis.add(context, 'type-error-star-star', input_node, message=m) return {}
def unpack_tuple_to_dict(value, types, exprlist): """ Unpacking tuple assignments in for statements and expr_stmts. """ if exprlist.type == 'name': return {exprlist.value: types} elif exprlist.type == 'atom' and exprlist.children[0] in ('(', '['): return unpack_tuple_to_dict(value, types, exprlist.children[1]) elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist', 'testlist_star_expr'): dct = {} parts = iter(exprlist.children[::2]) n = 0 for lazy_value in types.iterate(exprlist): n += 1 try: part = next(parts) except StopIteration: # TODO this value is probably not right. analysis.add( value, 'value-error-too-many-values', part, message= "ValueError: too many values to unpack (expected %s)" % n) else: dct.update( unpack_tuple_to_dict(value, lazy_value.infer(), part)) has_parts = next(parts, None) if types and has_parts is not None: # TODO this value is probably not right. analysis.add( value, 'value-error-too-few-values', has_parts, message="ValueError: need more than %s values to unpack" % n) return dct elif exprlist.type == 'power' or exprlist.type == 'atom_expr': # Something like ``arr[x], var = ...``. # This is something that is not yet supported, would also be difficult # to write into a dict. return {} elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings # Currently we're not supporting them. return {} raise NotImplementedError
def builtins_isinstance(objects, types, arguments, inference_state): bool_results = set() for o in objects: cls = o.py__class__() try: cls.py__bases__ except AttributeError: # This is temporary. Everything should have a class attribute in # Python?! Maybe we'll leave it here, because some numpy objects or # whatever might not. bool_results = set([True, False]) break mro = list(cls.py__mro__()) for cls_or_tup in types: if cls_or_tup.is_class(): bool_results.add(cls_or_tup in mro) elif cls_or_tup.name.string_name == 'tuple' \ and cls_or_tup.get_root_context().is_builtins_module(): # Check for tuples. classes = ValueSet.from_sets( lazy_value.infer() for lazy_value in cls_or_tup.iterate() ) bool_results.add(any(cls in mro for cls in classes)) else: _, lazy_value = list(arguments.unpack())[1] if isinstance(lazy_value, LazyTreeValue): node = lazy_value.data message = 'TypeError: isinstance() arg 2 must be a ' \ 'class, type, or tuple of classes and types, ' \ 'not %s.' % cls_or_tup analysis.add(lazy_value.context, 'type-error-isinstance', node, message) return ValueSet( compiled.builtin_from_name(inference_state, str(b)) for b in bool_results )
def _infer_comparison_part(inference_state, context, left, operator, right): l_is_num = is_number(left) r_is_num = is_number(right) if isinstance(operator, str): str_operator = operator else: str_operator = str(operator.value) if str_operator == '*': # for iterables, ignore * operations if isinstance(left, iterable.Sequence) or is_string(left): return ValueSet([left]) elif isinstance(right, iterable.Sequence) or is_string(right): return ValueSet([right]) elif str_operator == '+': if l_is_num and r_is_num or is_string(left) and is_string(right): return left.execute_operation(right, str_operator) elif _is_list(left) and _is_list(right) or _is_tuple( left) and _is_tuple(right): return ValueSet( [iterable.MergedArray(inference_state, (left, right))]) elif str_operator == '-': if l_is_num and r_is_num: return left.execute_operation(right, str_operator) elif str_operator == '%': # With strings and numbers the left type typically remains. Except for # `int() % float()`. return ValueSet([left]) elif str_operator in COMPARISON_OPERATORS: if left.is_compiled() and right.is_compiled(): # Possible, because the return is not an option. Just compare. result = left.execute_operation(right, str_operator) if result: return result else: if str_operator in ('is', '!=', '==', 'is not'): operation = COMPARISON_OPERATORS[str_operator] bool_ = operation(left, right) # Only if == returns True or != returns False, we can continue. # There's no guarantee that they are not equal. This can help # in some cases, but does not cover everything. if (str_operator in ('is', '==')) == bool_: return ValueSet([_bool_to_value(inference_state, bool_)]) if isinstance(left, VersionInfo): version_info = _get_tuple_ints(right) if version_info is not None: bool_result = compiled.access.COMPARISON_OPERATORS[ operator](inference_state.environment.version_info, tuple(version_info)) return ValueSet( [_bool_to_value(inference_state, bool_result)]) return ValueSet([ _bool_to_value(inference_state, True), _bool_to_value(inference_state, False) ]) elif str_operator in ('in', 'not in'): return NO_VALUES def check(obj): """Checks if a Jedi object is either a float or an int.""" return isinstance(obj, TreeInstance) and \ obj.name.string_name in ('int', 'float') # Static analysis, one is a number, the other one is not. if str_operator in ('+', '-') and l_is_num != r_is_num \ and not (check(left) or check(right)): message = "TypeError: unsupported operand type(s) for +: %s and %s" analysis.add(context, 'type-error-operation', operator, message % (left, right)) if left.is_class() or right.is_class(): return NO_VALUES method_name = operator_to_magic_method[str_operator] magic_methods = left.py__getattribute__(method_name) if magic_methods: result = magic_methods.execute_with_values(right) if result: return result if not magic_methods: reverse_method_name = reverse_operator_to_magic_method[str_operator] magic_methods = right.py__getattribute__(reverse_method_name) result = magic_methods.execute_with_values(left) if result: return result result = ValueSet([left, right]) debug.dbg('Used operator %s resulting in %s', operator, result) return result
def _add_error(value, name, message): if hasattr(name, 'parent') and value is not None: analysis.add(value, 'import-error', name, message) else: debug.warning('ImportError without origin: ' + message)
def _infer_comparison_part(inference_state, context, left, operator, right): l_is_num = is_number(left) r_is_num = is_number(right) if isinstance(operator, unicode): str_operator = operator else: str_operator = force_unicode(str(operator.value)) if str_operator == '*': # for iterables, ignore * operations if isinstance(left, iterable.Sequence) or is_string(left): return ValueSet([left]) elif isinstance(right, iterable.Sequence) or is_string(right): return ValueSet([right]) elif str_operator == '+': if l_is_num and r_is_num or is_string(left) and is_string(right): return ValueSet([left.execute_operation(right, str_operator)]) elif _is_tuple(left) and _is_tuple(right) or _is_list( left) and _is_list(right): return ValueSet( [iterable.MergedArray(inference_state, (left, right))]) elif str_operator == '-': if l_is_num and r_is_num: return ValueSet([left.execute_operation(right, str_operator)]) elif str_operator == '%': # With strings and numbers the left type typically remains. Except for # `int() % float()`. return ValueSet([left]) elif str_operator in COMPARISON_OPERATORS: if left.is_compiled() and right.is_compiled(): # Possible, because the return is not an option. Just compare. try: return ValueSet([left.execute_operation(right, str_operator)]) except TypeError: # Could be True or False. pass else: if str_operator in ('is', '!=', '==', 'is not'): operation = COMPARISON_OPERATORS[str_operator] bool_ = operation(left, right) return ValueSet([_bool_to_value(inference_state, bool_)]) if isinstance(left, VersionInfo): version_info = _get_tuple_ints(right) if version_info is not None: bool_result = compiled.access.COMPARISON_OPERATORS[ operator](inference_state.environment.version_info, tuple(version_info)) return ValueSet( [_bool_to_value(inference_state, bool_result)]) return ValueSet([ _bool_to_value(inference_state, True), _bool_to_value(inference_state, False) ]) elif str_operator == 'in': return NO_VALUES def check(obj): """Checks if a Jedi object is either a float or an int.""" return isinstance(obj, TreeInstance) and \ obj.name.string_name in ('int', 'float') # Static analysis, one is a number, the other one is not. if str_operator in ('+', '-') and l_is_num != r_is_num \ and not (check(left) or check(right)): message = "TypeError: unsupported operand type(s) for +: %s and %s" analysis.add(context, 'type-error-operation', operator, message % (left, right)) result = ValueSet([left, right]) debug.dbg('Used operator %s resulting in %s', operator, result) return result
def get_executed_param_names_and_issues(function_value, arguments): """ Return a tuple of: - a list of `ExecutedParamName`s corresponding to the arguments of the function execution `function_value`, containing the inferred value of those arguments (whether explicit or default) - a list of the issues encountered while building that list For example, given: ``` def foo(a, b, c=None, d='d'): ... foo(42, c='c') ``` Then for the execution of `foo`, this will return a tuple containing: - a list with entries for each parameter a, b, c & d; the entries for a, c, & d will have their values (42, 'c' and 'd' respectively) included. - a list with a single entry about the lack of a value for `b` """ def too_many_args(argument): m = _error_argument_count(funcdef, len(unpacked_va)) # Just report an error for the first param that is not needed (like # cPython). if arguments.get_calling_nodes(): # There might not be a valid calling node so check for that first. issues.append( _add_argument_issue('type-error-too-many-arguments', argument, message=m)) else: issues.append(None) debug.warning('non-public warning: %s', m) issues = [] # List[Optional[analysis issue]] result_params = [] param_dict = {} funcdef = function_value.tree_node # Default params are part of the value where the function was defined. # This means that they might have access on class variables that the # function itself doesn't have. default_param_context = function_value.get_default_param_context() for param in funcdef.get_params(): param_dict[param.name.value] = param unpacked_va = list(arguments.unpack(funcdef)) var_arg_iterator = PushBackIterator(iter(unpacked_va)) non_matching_keys = defaultdict(lambda: []) keys_used = {} keys_only = False had_multiple_value_error = False for param in funcdef.get_params(): # The value and key can both be null. There, the defaults apply. # args / kwargs will just be empty arrays / dicts, respectively. # Wrong value count is just ignored. If you try to test cases that are # not allowed in Python, Jedi will maybe not show any completions. is_default = False key, argument = next(var_arg_iterator, (None, None)) while key is not None: keys_only = True try: key_param = param_dict[key] except KeyError: non_matching_keys[key] = argument else: if key in keys_used: had_multiple_value_error = True m = ( "TypeError: %s() got multiple values for keyword argument '%s'." % (funcdef.name, key)) for contextualized_node in arguments.get_calling_nodes(): issues.append( analysis.add(contextualized_node.context, 'type-error-multiple-values', contextualized_node.node, message=m)) else: keys_used[key] = ExecutedParamName(function_value, arguments, key_param, argument) key, argument = next(var_arg_iterator, (None, None)) try: result_params.append(keys_used[param.name.value]) continue except KeyError: pass if param.star_count == 1: # *args param lazy_value_list = [] if argument is not None: lazy_value_list.append(argument) for key, argument in var_arg_iterator: # Iterate until a key argument is found. if key: var_arg_iterator.push_back((key, argument)) break lazy_value_list.append(argument) seq = iterable.FakeTuple(function_value.inference_state, lazy_value_list) result_arg = LazyKnownValue(seq) elif param.star_count == 2: if argument is not None: too_many_args(argument) # **kwargs param dct = iterable.FakeDict(function_value.inference_state, dict(non_matching_keys)) result_arg = LazyKnownValue(dct) non_matching_keys = {} else: # normal param if argument is None: # No value: Return an empty container if param.default is None: result_arg = LazyUnknownValue() if not keys_only: for contextualized_node in arguments.get_calling_nodes( ): m = _error_argument_count(funcdef, len(unpacked_va)) issues.append( analysis.add( contextualized_node.context, 'type-error-too-few-arguments', contextualized_node.node, message=m, )) else: result_arg = LazyTreeValue(default_param_context, param.default) is_default = True else: result_arg = argument result_params.append( ExecutedParamName(function_value, arguments, param, result_arg, is_default=is_default)) if not isinstance(result_arg, LazyUnknownValue): keys_used[param.name.value] = result_params[-1] if keys_only: # All arguments should be handed over to the next function. It's not # about the values inside, it's about the names. Jedi needs to now that # there's nothing to find for certain names. for k in set(param_dict) - set(keys_used): param = param_dict[k] if not (non_matching_keys or had_multiple_value_error or param.star_count or param.default): # add a warning only if there's not another one. for contextualized_node in arguments.get_calling_nodes(): m = _error_argument_count(funcdef, len(unpacked_va)) issues.append( analysis.add(contextualized_node.context, 'type-error-too-few-arguments', contextualized_node.node, message=m)) for key, lazy_value in non_matching_keys.items(): m = "TypeError: %s() got an unexpected keyword argument '%s'." \ % (funcdef.name, key) issues.append( _add_argument_issue('type-error-keyword-argument', lazy_value, message=m)) remaining_arguments = list(var_arg_iterator) if remaining_arguments: first_key, lazy_value = remaining_arguments[0] too_many_args(lazy_value) return result_params, issues
def _add_argument_issue(error_name, lazy_value, message): if isinstance(lazy_value, LazyTreeValue): node = lazy_value.data if node.parent.type == 'argument': node = node.parent return analysis.add(lazy_value.context, error_name, node, message)