def import_module_by_names(inference_state, import_names, sys_path=None, module_context=None, prefer_stubs=True): if sys_path is None: sys_path = inference_state.get_sys_path() str_import_names = tuple( force_unicode(i.value if isinstance(i, tree.Name) else i) for i in import_names) value_set = [None] for i, name in enumerate(import_names): value_set = ValueSet.from_sets([ import_module( inference_state, str_import_names[:i + 1], parent_module_value, sys_path, prefer_stubs=prefer_stubs, ) for parent_module_value in value_set ]) if not value_set: message = 'No module named ' + '.'.join(str_import_names) if module_context is not None: _add_error(module_context, name, message) else: debug.warning(message) return NO_VALUES return value_set
def _get(self, name, allowed_getattr_callback, in_dir_callback, check_has_attribute=False): """ To remove quite a few access calls we introduced the callback here. """ # Always use unicode objects in Python 2 from here. name = force_unicode(name) if self._inference_state.allow_descriptor_getattr: pass has_attribute, is_descriptor = allowed_getattr_callback( name, unsafe=self._inference_state.allow_descriptor_getattr) if check_has_attribute and not has_attribute: return [] if (is_descriptor or not has_attribute) \ and not self._inference_state.allow_descriptor_getattr: return [self._get_cached_name(name, is_empty=True)] if self.is_instance and not in_dir_callback(name): return [] return [self._get_cached_name(name)]
def check_hasattr(node, suite): try: assert suite.start_pos <= medi_name.start_pos < suite.end_pos assert node.type in ('power', 'atom_expr') base = node.children[0] assert base.type == 'name' and base.value == 'hasattr' trailer = node.children[1] assert trailer.type == 'trailer' arglist = trailer.children[1] assert arglist.type == 'arglist' from medi.inference.arguments import TreeArguments args = TreeArguments(node_context.inference_state, node_context, arglist) unpacked_args = list(args.unpack()) # Arguments should be very simple assert len(unpacked_args) == 2 # Check name key, lazy_value = unpacked_args[1] names = list(lazy_value.infer()) assert len(names) == 1 and is_string(names[0]) assert force_unicode(names[0].get_safe_value()) == payload[1].value # Check objects key, lazy_value = unpacked_args[0] objects = lazy_value.infer() return payload[0] in objects except AssertionError: return False
def _parse_function_doc(doc): """ Takes a function and returns the params and return value as a tuple. This is nothing more than a docstring parser. TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None TODO docstrings like 'tuple of integers' """ doc = force_unicode(doc) # parse round parentheses: def func(a, (b,c)) try: count = 0 start = doc.index('(') for i, s in enumerate(doc[start:]): if s == '(': count += 1 elif s == ')': count -= 1 if count == 0: end = start + i break param_str = doc[start + 1:end] except (ValueError, UnboundLocalError): # ValueError for doc.index # UnboundLocalError for undefined end in last line debug.dbg('no brackets found - no param') end = 0 param_str = u'' else: # remove square brackets, that show an optional param ( = None) def change_options(m): args = m.group(1).split(',') for i, a in enumerate(args): if a and '=' not in a: args[i] += '=None' return ','.join(args) while True: param_str, changes = re.subn(r' ?\[([^\[\]]+)\]', change_options, param_str) if changes == 0: break param_str = param_str.replace('-', '_') # see: isinstance.__doc__ # parse return value r = re.search(u'-[>-]* ', doc[end:end + 7]) if r is None: ret = u'' else: index = end + r.end() # get result type, which can contain newlines pattern = re.compile(r'(,\n|[^\n-])+') ret_str = pattern.match(doc, index).group(0).strip() # New object -> object() ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str) ret = docstr_defaults.get(ret_str, ret_str) return param_str, ret
def get_dir_infos(self): """ Used to return a couple of infos that are needed when accessing the sub objects of an objects """ tuples = dict( (force_unicode(name), self.is_allowed_getattr(name)) for name in self.dir() ) return self.needs_type_completions(), tuples
def get_qualified_names(self): def try_to_get_name(obj): return getattr(obj, '__qualname__', getattr(obj, '__name__', None)) if self.is_module(): return () name = try_to_get_name(self._obj) if name is None: name = try_to_get_name(type(self._obj)) if name is None: return () return tuple(force_unicode(n) for n in name.split('.'))
def _abs_path(module_context, path): if os.path.isabs(path): return path module_path = module_context.py__file__() if module_path is None: # In this case we have no idea where we actually are in the file # system. return None base_dir = os.path.dirname(module_path) path = force_unicode(path) return os.path.abspath(os.path.join(base_dir, path))
def clean_scope_docstring(scope_node): """ Returns a cleaned version of the docstring token. """ node = scope_node.get_doc_node() if node is not None: # TODO We have to check next leaves until there are no new # leaves anymore that might be part of the docstring. A # docstring can also look like this: ``'foo' 'bar' # Returns a literal cleaned version of the ``Token``. cleaned = cleandoc(safe_literal_eval(node.value)) # Since we want the docstr output to be always unicode, just # force it. return force_unicode(cleaned) return ''
def find_statement_documentation(tree_node): if tree_node.type == 'expr_stmt': tree_node = tree_node.parent # simple_stmt maybe_string = tree_node.get_next_sibling() if maybe_string is not None: if maybe_string.type == 'simple_stmt': maybe_string = maybe_string.children[0] if maybe_string.type == 'string': cleaned = cleandoc(safe_literal_eval(maybe_string.value)) # Since we want the docstr output to be always unicode, just # force it. return force_unicode(cleaned) return ''
def _get_forward_reference_node(context, string): try: new_node = context.inference_state.grammar.parse( force_unicode(string), start_symbol='eval_input', error_recovery=False) except ParserSyntaxError: debug.warning('Annotation not parsed: %s' % string) return None else: module = context.tree_node.get_root_node() parser_utils.move(new_node, module.end_pos[0]) new_node.parent = context.tree_node return new_node
def py__name__(self): if not _is_class_instance(self._obj) or \ inspect.ismethoddescriptor(self._obj): # slots cls = self._obj else: try: cls = self._obj.__class__ except AttributeError: # happens with numpy.core.umath._UFUNC_API (you get it # automatically by doing `import numpy`. return None try: return force_unicode(cls.__name__) except AttributeError: return None
def _find_string_name(self, lazy_value): if lazy_value is None: return None value_set = lazy_value.infer() if not value_set: return None if len(value_set) > 1: debug.warning('Found multiple values for a type variable: %s', value_set) name_value = next(iter(value_set)) try: method = name_value.get_safe_value except AttributeError: return None else: safe_value = method(default=None) if self.inference_state.environment.version_info.major == 2: if isinstance(safe_value, bytes): return force_unicode(safe_value) if isinstance(safe_value, (str, unicode)): return safe_value return None
def _force_unicode_decorator(func): return lambda *args, **kwargs: force_unicode(func(*args, **kwargs))
def __call__(self, func): self.func = func if self.check_name is None: self.check_name = force_unicode(func.__name__[2:]) return self
def _infer_comparison_part(inference_state, context, left, operator, right): l_is_num = is_number(left) r_is_num = is_number(right) if isinstance(operator, unicode): str_operator = operator else: str_operator = force_unicode(str(operator.value)) if str_operator == '*': # for iterables, ignore * operations if isinstance(left, iterable.Sequence) or is_string(left): return ValueSet([left]) elif isinstance(right, iterable.Sequence) or is_string(right): return ValueSet([right]) elif str_operator == '+': if l_is_num and r_is_num or is_string(left) and is_string(right): return left.execute_operation(right, str_operator) elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right): return ValueSet([iterable.MergedArray(inference_state, (left, right))]) elif str_operator == '-': if l_is_num and r_is_num: return left.execute_operation(right, str_operator) elif str_operator == '%': # With strings and numbers the left type typically remains. Except for # `int() % float()`. return ValueSet([left]) elif str_operator in COMPARISON_OPERATORS: if left.is_compiled() and right.is_compiled(): # Possible, because the return is not an option. Just compare. result = left.execute_operation(right, str_operator) if result: return result else: if str_operator in ('is', '!=', '==', 'is not'): operation = COMPARISON_OPERATORS[str_operator] bool_ = operation(left, right) # Only if == returns True or != returns False, we can continue. # There's no guarantee that they are not equal. This can help # in some cases, but does not cover everything. if (str_operator in ('is', '==')) == bool_: return ValueSet([_bool_to_value(inference_state, bool_)]) if isinstance(left, VersionInfo): version_info = _get_tuple_ints(right) if version_info is not None: bool_result = compiled.access.COMPARISON_OPERATORS[operator]( inference_state.environment.version_info, tuple(version_info) ) return ValueSet([_bool_to_value(inference_state, bool_result)]) return ValueSet([ _bool_to_value(inference_state, True), _bool_to_value(inference_state, False) ]) elif str_operator in ('in', 'not in'): return NO_VALUES def check(obj): """Checks if a Medi object is either a float or an int.""" return isinstance(obj, TreeInstance) and \ obj.name.string_name in ('int', 'float') # Static analysis, one is a number, the other one is not. if str_operator in ('+', '-') and l_is_num != r_is_num \ and not (check(left) or check(right)): message = "TypeError: unsupported operand type(s) for +: %s and %s" analysis.add(context, 'type-error-operation', operator, message % (left, right)) if left.is_class() or right.is_class(): return NO_VALUES method_name = operator_to_magic_method[str_operator] magic_methods = left.py__getattribute__(method_name) if magic_methods: result = magic_methods.execute_with_values(right) if result: return result if not magic_methods: reverse_method_name = reverse_operator_to_magic_method[str_operator] magic_methods = right.py__getattribute__(reverse_method_name) result = magic_methods.execute_with_values(left) if result: return result result = ValueSet([left, right]) debug.dbg('Used operator %s resulting in %s', operator, result) return result
def _bool_to_value(inference_state, bool_): return compiled.builtin_from_name(inference_state, force_unicode(str(bool_)))
def py__doc__(self): return force_unicode(inspect.getdoc(self._obj)) or u''
def __init__(self, inference_state, import_path, module_context, level=0): """ An implementation similar to ``__import__``. Use `follow` to actually follow the imports. *level* specifies whether to use absolute or relative imports. 0 (the default) means only perform absolute imports. Positive values for level indicate the number of parent directories to search relative to the directory of the module calling ``__import__()`` (see PEP 328 for the details). :param import_path: List of namespaces (strings or Names). """ debug.speed('import %s %s' % (import_path, module_context)) self._inference_state = inference_state self.level = level self._module_context = module_context self._fixed_sys_path = None self._infer_possible = True if level: base = module_context.get_value().py__package__() # We need to care for two cases, the first one is if it's a valid # Python import. This import has a properly defined module name # chain like `foo.bar.baz` and an import in baz is made for # `..lala.` It can then resolve to `foo.bar.lala`. # The else here is a heuristic for all other cases, if for example # in `foo` you search for `...bar`, it's obviously out of scope. # However since Medi tries to just do it's best, we help the user # here, because he might have specified something wrong in his # project. if level <= len(base): # Here we basically rewrite the level to 0. base = tuple(base) if level > 1: base = base[:-level + 1] import_path = base + tuple(import_path) else: path = module_context.py__file__() project_path = self._inference_state.project._path import_path = list(import_path) if path is None: # If no path is defined, our best guess is that the current # file is edited by a user on the current working # directory. We need to add an initial path, because it # will get removed as the name of the current file. directory = project_path else: directory = os.path.dirname(path) base_import_path, base_directory = _level_to_base_import_path( project_path, directory, level, ) if base_directory is None: # Everything is lost, the relative import does point # somewhere out of the filesystem. self._infer_possible = False else: self._fixed_sys_path = [force_unicode(base_directory)] if base_import_path is None: if import_path: _add_error( module_context, import_path[0], message= 'Attempted relative import beyond top-level package.' ) else: import_path = base_import_path + import_path self.import_path = import_path