Ejemplo n.º 1
0
def builtins_isinstance(evaluator, objects, types, arguments):
    bool_results = set()
    for o in objects:
        try:
            mro_func = o.py__class__().py__mro__
        except AttributeError:
            # This is temporary. Everything should have a class attribute in
            # Python?! Maybe we'll leave it here, because some numpy objects or
            # whatever might not.
            return ContextSet(compiled.create(True), compiled.create(False))

        mro = mro_func()

        for cls_or_tup in types:
            if cls_or_tup.is_class():
                bool_results.add(cls_or_tup in mro)
            elif cls_or_tup.name.string_name == 'tuple' \
                    and cls_or_tup.get_root_context() == evaluator.BUILTINS:
                # Check for tuples.
                classes = ContextSet.from_sets(
                    lazy_context.infer()
                    for lazy_context in cls_or_tup.iterate()
                )
                bool_results.add(any(cls in mro for cls in classes))
            else:
                _, lazy_context = list(arguments.unpack())[1]
                if isinstance(lazy_context, LazyTreeContext):
                    node = lazy_context.data
                    message = 'TypeError: isinstance() arg 2 must be a ' \
                              'class, type, or tuple of classes and types, ' \
                              'not %s.' % cls_or_tup
                    analysis.add(lazy_context._context, 'type-error-isinstance', node, message)

    return ContextSet.from_iterable(compiled.create(evaluator, x) for x in bool_results)
Ejemplo n.º 2
0
    def get_return_values(self, check_yields=False):
        funcdef = self.tree_node
        if funcdef.type == 'lambdef':
            return self.eval_node(funcdef.children[-1])

        if check_yields:
            context_set = NO_CONTEXTS
            returns = get_yield_exprs(self.evaluator, funcdef)
        else:
            returns = funcdef.iter_return_stmts()
            context_set = docstrings.infer_return_types(self.function_context)
            context_set |= pep0484.infer_return_types(self.function_context)

        for r in returns:
            check = flow_analysis.reachability_check(self, funcdef, r)
            if check is flow_analysis.UNREACHABLE:
                debug.dbg('Return unreachable: %s', r)
            else:
                if check_yields:
                    context_set |= ContextSet.from_sets(
                        lazy_context.infer()
                        for lazy_context in self._get_yield_lazy_context(r)
                    )
                else:
                    try:
                        children = r.children
                    except AttributeError:
                        ctx = compiled.builtin_from_name(self.evaluator, u'None')
                        context_set |= ContextSet(ctx)
                    else:
                        context_set |= self.eval_node(children[1])
            if check is flow_analysis.REACHABLE:
                debug.dbg('Return reachable: %s', r)
                break
        return context_set
Ejemplo n.º 3
0
 def infer(self):
     with recursion.execution_allowed(self.evaluator, self) as allowed:
         # We need to catch recursions that may occur, because an
         # anonymous functions can create an anonymous parameter that is
         # more or less self referencing.
         if allowed:
             return ContextSet.from_sets(p.infer() for p in self._executed_params)
         return NO_CONTEXTS
Ejemplo n.º 4
0
def infer_import(context, tree_name, is_goto=False):
    module_context = context.get_root_context()
    import_node = search_ancestor(tree_name, 'import_name', 'import_from')
    import_path = import_node.get_path_for_name(tree_name)
    from_import_name = None
    evaluator = context.evaluator
    try:
        from_names = import_node.get_from_names()
    except AttributeError:
        # Is an import_name
        pass
    else:
        if len(from_names) + 1 == len(import_path):
            # We have to fetch the from_names part first and then check
            # if from_names exists in the modules.
            from_import_name = import_path[-1]
            import_path = from_names

    importer = Importer(evaluator, tuple(import_path),
                        module_context, import_node.level)

    types = importer.follow()

    #if import_node.is_nested() and not self.nested_resolve:
    #    scopes = [NestedImportModule(module, import_node)]

    if not types:
        return NO_CONTEXTS

    if from_import_name is not None:
        types = unite(
            t.py__getattribute__(
                from_import_name,
                name_context=context,
                is_goto=is_goto,
                analysis_errors=False
            )
            for t in types
        )
        if not is_goto:
            types = ContextSet.from_set(types)

        if not types:
            path = import_path + [from_import_name]
            importer = Importer(evaluator, tuple(path),
                                module_context, import_node.level)
            types = importer.follow()
            # goto only accepts `Name`
            if is_goto:
                types = set(s.name for s in types)
    else:
        # goto only accepts `Name`
        if is_goto:
            types = set(s.name for s in types)

    debug.dbg('after import: %s', types)
    return types
Ejemplo n.º 5
0
    def _imitate_items(self):
        items = ContextSet.from_iterable(
            FakeSequence(
                self.evaluator, u'tuple'
                (LazyKnownContexts(keys), LazyKnownContexts(values))
            ) for keys, values in self._iterate()
        )

        return create_evaluated_sequence_set(self.evaluator, items, sequence_type=u'list')
Ejemplo n.º 6
0
def py__getitem__(context, typ, node):
    if not typ.get_root_context().name.string_name == "typing":
        return None
    # we assume that any class using [] in a module called
    # "typing" with a name for which we have a replacement
    # should be replaced by that class. This is not 100%
    # airtight but I don't have a better idea to check that it's
    # actually the PEP-0484 typing module and not some other
    if node.type == "subscriptlist":
        nodes = node.children[::2]  # skip the commas
    else:
        nodes = [node]
    del node

    nodes = [_fix_forward_reference(context, node) for node in nodes]
    type_name = typ.name.string_name

    # hacked in Union and Optional, since it's hard to do nicely in parsed code
    if type_name in ("Union", '_Union'):
        # In Python 3.6 it's still called typing.Union but it's an instance
        # called _Union.
        return ContextSet.from_sets(context.eval_node(node) for node in nodes)
    if type_name in ("Optional", '_Optional'):
        # Here we have the same issue like in Union. Therefore we also need to
        # check for the instance typing._Optional (Python 3.6).
        return context.eval_node(nodes[0])

    module_node, code_lines = _get_typing_replacement_module(context.evaluator.latest_grammar)
    typing = ModuleContext(
        context.evaluator,
        module_node=module_node,
        path=None,
        code_lines=code_lines,
    )
    factories = typing.py__getattribute__("factory")
    assert len(factories) == 1
    factory = list(factories)[0]
    assert factory
    function_body_nodes = factory.tree_node.children[4].children
    valid_classnames = set(child.name.value
                           for child in function_body_nodes
                           if isinstance(child, tree.Class))
    if type_name not in valid_classnames:
        return None
    compiled_classname = compiled.create_simple_object(context.evaluator, type_name)

    from jedi.evaluate.context.iterable import FakeSequence
    args = FakeSequence(
        context.evaluator,
        u'tuple',
        [LazyTreeContext(context, n) for n in nodes]
    )

    result = factory.execute_evaluated(compiled_classname, args)
    return result
Ejemplo n.º 7
0
def _execute_types_in_stmt(module_context, stmt):
    """
    Executing all types or general elements that we find in a statement. This
    doesn't include tuple, list and dict literals, because the stuff they
    contain is executed. (Used as type information).
    """
    definitions = module_context.eval_node(stmt)
    return ContextSet.from_sets(
        _execute_array_values(module_context.evaluator, d)
        for d in definitions
    )
Ejemplo n.º 8
0
 def py__call__(self, params):
     if self.tree_node is not None and self.tree_node.type == 'funcdef':
         from jedi.evaluate.context.function import FunctionContext
         return FunctionContext(
             self.evaluator,
             parent_context=self.parent_context,
             funcdef=self.tree_node
         ).py__call__(params)
     if self.access_handle.is_class():
         from jedi.evaluate.context import CompiledInstance
         return ContextSet(CompiledInstance(self.evaluator, self.parent_context, self, params))
     else:
         return ContextSet.from_iterable(self._execute_function(params))
Ejemplo n.º 9
0
 def py__get__(self, obj, class_context):
     """
     obj may be None.
     """
     # Arguments in __get__ descriptors are obj, class.
     # `method` is the new parent of the array, don't know if that's good.
     names = self.get_function_slot_names(u'__get__')
     if names:
         if obj is None:
             obj = compiled.builtin_from_name(self.evaluator, u'None')
         return self.execute_function_slots(names, obj, class_context)
     else:
         return ContextSet([self])
Ejemplo n.º 10
0
    def wrapper(evaluator, import_names, parent_module_context, sys_path,
                prefer_stubs):
        try:
            actual_context_set = evaluator.module_cache.get(import_names)
        except KeyError:
            if parent_module_context is not None and parent_module_context.is_stub(
            ):
                parent_module_contexts = parent_module_context.non_stub_context_set
            else:
                parent_module_contexts = [parent_module_context]
            if import_names == ('os', 'path'):
                # This is a huge exception, we follow a nested import
                # ``os.path``, because it's a very important one in Python
                # that is being achieved by messing with ``sys.modules`` in
                # ``os``.
                actual_parent = next(iter(parent_module_contexts))
                if actual_parent is None:
                    actual_parent, = evaluator.import_module(
                        ('os', ), prefer_stubs=False)
                actual_context_set = actual_parent.py__getattribute__('path')
            else:
                actual_context_set = ContextSet.from_sets(
                    func(
                        evaluator,
                        import_names,
                        p,
                        sys_path,
                    ) for p in parent_module_contexts)
            evaluator.module_cache.add(import_names, actual_context_set)

        if not prefer_stubs:
            return actual_context_set

        stub = _try_to_load_stub_cached(evaluator, import_names,
                                        actual_context_set,
                                        parent_module_context, sys_path)
        if stub is not None:
            return ContextSet([stub])
        return actual_context_set
Ejemplo n.º 11
0
def _literals_to_types(evaluator, result):
    # Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
    # int(), float(), etc).
    new_result = NO_CONTEXTS
    for typ in result:
        if is_literal(typ):
            # Literals are only valid as long as the operations are
            # correct. Otherwise add a value-free instance.
            cls = compiled.builtin_from_name(evaluator, typ.name.string_name)
            new_result |= cls.execute_evaluated()
        else:
            new_result |= ContextSet(typ)
    return new_result
Ejemplo n.º 12
0
def infer_return_types(function_execution_context):
    """
    Infers the type of a function's return value,
    according to type annotations.
    """
    all_annotations = py__annotations__(function_execution_context.tree_node)
    annotation = all_annotations.get("return", None)
    if annotation is None:
        # If there is no Python 3-type annotation, look for a Python 2-type annotation
        node = function_execution_context.tree_node
        comment = parser_utils.get_following_comment_same_line(node)
        if comment is None:
            return NO_CONTEXTS

        match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
        if not match:
            return NO_CONTEXTS

        return _evaluate_annotation_string(
            function_execution_context.function_context.
            get_default_param_context(),
            match.group(1).strip()).execute_annotation()
        if annotation is None:
            return NO_CONTEXTS

    context = function_execution_context.function_context.get_default_param_context(
    )
    unknown_type_vars = list(find_unknown_type_vars(context, annotation))
    annotation_contexts = eval_annotation(context, annotation)
    if not unknown_type_vars:
        return annotation_contexts.execute_annotation()

    type_var_dict = infer_type_vars_for_execution(function_execution_context,
                                                  all_annotations)

    return ContextSet.from_sets(
        ann.define_generics(type_var_dict) if isinstance(
            ann, (AbstractAnnotatedClass, TypeVar)) else ContextSet({ann})
        for ann in annotation_contexts).execute_annotation()
Ejemplo n.º 13
0
    def define_generics(self, type_var_dict):
        from jedi.evaluate.gradual.typing import GenericClass

        def remap_type_vars():
            """
            The TypeVars in the resulting classes have sometimes different names
            and we need to check for that, e.g. a signature can be:

            def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...

            However, the iterator is defined as Iterator[_T_co], which means it has
            a different type var name.
            """
            for type_var in self.list_type_vars():
                yield type_var_dict.get(type_var.py__name__(), NO_CONTEXTS)

        if type_var_dict:
            return ContextSet([GenericClass(
                self,
                generics=tuple(remap_type_vars())
            )])
        return ContextSet({self})
Ejemplo n.º 14
0
 def py__get__(self, obj):
     # Arguments in __get__ descriptors are obj, class.
     # `method` is the new parent of the array, don't know if that's good.
     names = self.get_function_slot_names(u'__get__')
     if names:
         if isinstance(obj, AbstractInstanceContext):
             return self.execute_function_slots(names, obj,
                                                obj.class_context)
         else:
             none_obj = compiled.builtin_from_name(self.evaluator, u'None')
             return self.execute_function_slots(names, none_obj, obj)
     else:
         return ContextSet(self)
Ejemplo n.º 15
0
def builtins_isinstance(evaluator, objects, types, arguments):
    bool_results = set()
    for o in objects:
        cls = o.py__class__()
        try:
            mro_func = cls.py__mro__
        except AttributeError:
            # This is temporary. Everything should have a class attribute in
            # Python?! Maybe we'll leave it here, because some numpy objects or
            # whatever might not.
            bool_results = set([True, False])
            break

        mro = mro_func()

        for cls_or_tup in types:
            if cls_or_tup.is_class():
                bool_results.add(cls_or_tup in mro)
            elif cls_or_tup.name.string_name == 'tuple' \
                    and cls_or_tup.get_root_context() == evaluator.builtins_module:
                # Check for tuples.
                classes = ContextSet.from_sets(
                    lazy_context.infer()
                    for lazy_context in cls_or_tup.iterate())
                bool_results.add(any(cls in mro for cls in classes))
            else:
                _, lazy_context = list(arguments.unpack())[1]
                if isinstance(lazy_context, LazyTreeContext):
                    node = lazy_context.data
                    message = 'TypeError: isinstance() arg 2 must be a ' \
                              'class, type, or tuple of classes and types, ' \
                              'not %s.' % cls_or_tup
                    analysis.add(lazy_context._context,
                                 'type-error-isinstance', node, message)

    return ContextSet.from_iterable(
        compiled.builtin_from_name(evaluator, force_unicode(str(b)))
        for b in bool_results)
Ejemplo n.º 16
0
def _create(evaluator, access_handle, parent_context, *args):
    compiled_object = create_cached_compiled_object(
        evaluator,
        access_handle,
        parent_context=parent_context and parent_context.compiled_object)

    # TODO accessing this is bad, but it probably doesn't matter that much,
    # because we're working with interpreteters only here.
    python_object = access_handle.access._obj
    result = _find_syntax_node_name(evaluator, python_object)
    if result is None:
        # TODO Care about generics from stuff like `[1]` and don't return like this.
        if type(python_object) in (dict, list, tuple):
            return ContextSet({compiled_object})

        tree_contexts = to_stub(compiled_object)
        if not tree_contexts:
            return ContextSet({compiled_object})
    else:
        module_node, tree_node, file_io, code_lines = result

        if parent_context is None:
            # TODO this __name__ is probably wrong.
            name = compiled_object.get_root_context().py__name__()
            string_names = tuple(name.split('.'))
            module_context = ModuleContext(
                evaluator,
                module_node,
                file_io=file_io,
                string_names=string_names,
                code_lines=code_lines,
                is_package=hasattr(compiled_object, 'py__path__'),
            )
            if name is not None:
                evaluator.module_cache.add(string_names,
                                           ContextSet([module_context]))
        else:
            assert parent_context.tree_node.get_root_node() == module_node
            module_context = parent_context.get_root_context()

        tree_contexts = ContextSet({
            module_context.create_context(tree_node,
                                          node_is_context=True,
                                          node_is_object=True)
        })
        if tree_node.type == 'classdef':
            if not access_handle.is_class():
                # Is an instance, not a class.
                tree_contexts = tree_contexts.execute_evaluated()

    return ContextSet(
        MixedObject(compiled_object, tree_context=tree_context)
        for tree_context in tree_contexts)
Ejemplo n.º 17
0
    def py__call__(self, arguments):
        unpacked = arguments.unpack()

        key, lazy_context = next(unpacked, (None, None))
        var_name = self._find_string_name(lazy_context)
        # The name must be given, otherwise it's useless.
        if var_name is None or key is not None:
            debug.warning('Found a variable without a name %s', arguments)
            return NO_CONTEXTS

        return ContextSet([
            TypeVar.create_cached(self.evaluator, self.parent_context,
                                  self._tree_name, var_name, unpacked)
        ])
Ejemplo n.º 18
0
    def _imitate_items(self):
        lazy_contexts = [
            LazyKnownContext(
                FakeSequence(
                    self.evaluator,
                    u'tuple',
                    [LazyKnownContexts(key),
                     LazyKnownContexts(value)]
                )
            )
            for key, value in self._iterate()
        ]

        return ContextSet([FakeSequence(self.evaluator, u'list', lazy_contexts)])
Ejemplo n.º 19
0
 def infer(self):
     for filter in self._builtin_context.get_filters():
         # We can take the first index, because on builtin methods there's
         # always only going to be one name. The same is true for the
         # inferred values.
         for name in filter.get(self.string_name):
             builtin_func = next(iter(name.infer()))
             break
         else:
             continue
         break
     return ContextSet([
         _BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)
     ])
Ejemplo n.º 20
0
def _python_to_stub_names(names, fallback_to_python=False):
    for name in names:
        module = name.get_root_context()
        if module.is_stub():
            yield name
            continue

        if name.is_import():
            for new_name in name.goto():
                # Imports don't need to be converted, because they are already
                # stubs if possible.
                if fallback_to_python or new_name.is_stub():
                    yield new_name
            continue

        name_list = name.get_qualified_names()
        stubs = NO_CONTEXTS
        if name_list is not None:
            stub_module = _load_stub_module(module)
            if stub_module is not None:
                stubs = ContextSet({stub_module})
                for name in name_list[:-1]:
                    stubs = stubs.py__getattribute__(name)
        if stubs and name_list:
            new_names = stubs.py__getattribute__(name_list[-1], is_goto=True)
            for new_name in new_names:
                yield new_name
            if new_names:
                continue
        elif stubs:
            for c in stubs:
                yield c.name
            continue
        if fallback_to_python:
            # This is the part where if we haven't found anything, just return
            # the stub name.
            yield name
Ejemplo n.º 21
0
def py__getitem__(context, typ, node):
    if not typ.get_root_context().name.string_name == "typing":
        return None
    # we assume that any class using [] in a module called
    # "typing" with a name for which we have a replacement
    # should be replaced by that class. This is not 100%
    # airtight but I don't have a better idea to check that it's
    # actually the PEP-0484 typing module and not some other
    if node.type == "subscriptlist":
        nodes = node.children[::2]  # skip the commas
    else:
        nodes = [node]
    del node

    nodes = [_fix_forward_reference(context, node) for node in nodes]
    type_name = typ.name.string_name

    # hacked in Union and Optional, since it's hard to do nicely in parsed code
    if type_name in ("Union", '_Union'):
        # In Python 3.6 it's still called typing.Union but it's an instance
        # called _Union.
        return ContextSet.from_sets(context.eval_node(node) for node in nodes)
    if type_name in ("Optional", '_Optional'):
        # Here we have the same issue like in Union. Therefore we also need to
        # check for the instance typing._Optional (Python 3.6).
        return context.eval_node(nodes[0])

    typing = ModuleContext(context.evaluator,
                           module_node=_get_typing_replacement_module(
                               context.evaluator.latest_grammar),
                           path=None)
    factories = typing.py__getattribute__("factory")
    assert len(factories) == 1
    factory = list(factories)[0]
    assert factory
    function_body_nodes = factory.tree_node.children[4].children
    valid_classnames = set(child.name.value for child in function_body_nodes
                           if isinstance(child, tree.Class))
    if type_name not in valid_classnames:
        return None
    compiled_classname = compiled.create_simple_object(context.evaluator,
                                                       type_name)

    from jedi.evaluate.context.iterable import FakeSequence
    args = FakeSequence(context.evaluator, u'tuple',
                        [LazyTreeContext(context, n) for n in nodes])

    result = factory.execute_evaluated(compiled_classname, args)
    return result
Ejemplo n.º 22
0
    def execute_annotation(self):
        string_name = self._tree_name.value

        if string_name == 'Union':
            # This is kind of a special case, because we have Unions (in Jedi
            # ContextSets).
            return self.gather_annotation_classes().execute_annotation()
        elif string_name == 'Optional':
            # Optional is basically just saying it's either None or the actual
            # type.
            return self.gather_annotation_classes().execute_annotation() \
                | ContextSet([builtin_from_name(self.evaluator, u'None')])
        elif string_name == 'Type':
            # The type is actually already given in the index_context
            return ContextSet([self._index_context])
        elif string_name == 'ClassVar':
            # For now don't do anything here, ClassVars are always used.
            return self._index_context.execute_annotation()

        cls = globals()[string_name]
        return ContextSet([
            cls(self.evaluator, self.parent_context, self._tree_name,
                self._index_context, self._context_of_index)
        ])
Ejemplo n.º 23
0
    def define_generics(self, type_var_dict):
        changed = False
        new_generics = []
        for generic_set in self.get_generics():
            contexts = NO_CONTEXTS
            for generic in generic_set:
                if isinstance(generic, (AbstractAnnotatedClass, TypeVar)):
                    result = generic.define_generics(type_var_dict)
                    contexts |= result
                    if result != ContextSet({generic}):
                        changed = True
                else:
                    contexts |= ContextSet([generic])
            new_generics.append(contexts)

        if not changed:
            # There might not be any type vars that change. In that case just
            # return itself, because it does not make sense to potentially lose
            # cached results.
            return ContextSet([self])

        return ContextSet([
            GenericClass(self._wrapped_context, generics=tuple(new_generics))
        ])
Ejemplo n.º 24
0
    def py__getitem__(self, index):
        """Here the index is an int/str. Raises IndexError/KeyError."""
        if self.array_type == 'dict':
            for key, value in self._items():
                for k in self._defining_context.eval_node(key):
                    if isinstance(k, compiled.CompiledObject) \
                            and index == k.obj:
                        return self._defining_context.eval_node(value)
            raise KeyError('No key found in dictionary %s.' % self)

        # Can raise an IndexError
        if isinstance(index, slice):
            return ContextSet(self)
        else:
            return self._defining_context.eval_node(self._items()[index])
Ejemplo n.º 25
0
 def infer(self):
     # TODO use logic from compiled.CompiledObjectFilter
     access_paths = self.parent_context.access_handle.getattr_paths(
         self.string_name, default=None)
     assert len(access_paths)
     context = None
     for access in access_paths:
         if context is None or isinstance(context, MixedObject):
             context = _create(self._evaluator,
                               access,
                               parent_context=context)
         else:
             context = create_cached_compiled_object(
                 context.evaluator, access, context)
     return ContextSet([context])
Ejemplo n.º 26
0
def _execute_array_values(evaluator, array):
    """
    Tuples indicate that there's not just one return value, but the listed
    ones.  `(str, int)` means that it returns a tuple with both types.
    """
    from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence
    if isinstance(array, SequenceLiteralContext):
        values = []
        for lazy_context in array.py__iter__():
            objects = ContextSet.from_sets(
                _execute_array_values(evaluator, typ)
                for typ in lazy_context.infer())
            values.append(LazyKnownContexts(objects))
        return {FakeSequence(evaluator, array.array_type, values)}
    else:
        return array.execute_evaluated()
Ejemplo n.º 27
0
    def py__getitem__(self, index):
        """Here the index is an int/str. Raises IndexError/KeyError."""
        if self.array_type == u'dict':
            compiled_obj_index = compiled.create_simple_object(self.evaluator, index)
            for key, value in self._items():
                for k in self._defining_context.eval_node(key):
                    if isinstance(k, compiled.CompiledObject) \
                            and k.execute_operation(compiled_obj_index, u'==').get_safe_value():
                        return self._defining_context.eval_node(value)
            raise KeyError('No key found in dictionary %s.' % self)

        # Can raise an IndexError
        if isinstance(index, slice):
            return ContextSet(self)
        else:
            return self._defining_context.eval_node(self._items()[index])
Ejemplo n.º 28
0
def collections_namedtuple(evaluator, obj, arguments):
    """
    Implementation of the namedtuple function.

    This has to be done by processing the namedtuple class template and
    evaluating the result.

    .. note:: |jedi| only supports namedtuples on Python >2.6.

    """
    # Namedtuples are not supported on Python 2.6
    if not hasattr(collections, '_class_template'):
        return NO_CONTEXTS

    # Process arguments
    # TODO here we only use one of the types, we should use all. id:464 gh:465
    name = list(_follow_param(evaluator, arguments, 0))[0].obj
    _fields = list(_follow_param(evaluator, arguments, 1))[0]
    if isinstance(_fields, compiled.CompiledObject):
        fields = _fields.obj.replace(',', ' ').split()
    elif isinstance(_fields, iterable.AbstractIterable):
        fields = [
            v.obj for lazy_context in _fields.py__iter__()
            for v in lazy_context.infer() if hasattr(v, 'obj')
        ]
    else:
        return NO_CONTEXTS

    base = collections._class_template
    base += _NAMEDTUPLE_INIT
    # Build source
    source = base.format(
        typename=name,
        field_names=tuple(fields),
        num_fields=len(fields),
        arg_list=repr(tuple(fields)).replace("'", "")[1:-1],
        repr_fmt=', '.join(
            collections._repr_template.format(name=name) for name in fields),
        field_defs='\n'.join(
            collections._field_template.format(index=index, name=name)
            for index, name in enumerate(fields)))

    # Parse source
    module = evaluator.grammar.parse(source)
    generated_class = next(module.iter_classdefs())
    parent_context = ModuleContext(evaluator, module, '')
    return ContextSet(ClassContext(evaluator, parent_context, generated_class))
Ejemplo n.º 29
0
    def completion_names(self, evaluator, only_modules=False):
        """
        :param only_modules: Indicates wheter it's possible to import a
            definition that is not defined in a module.
        """
        if not self._inference_possible:
            return []

        names = []
        if self.import_path:
            # flask
            if self._str_import_path == ('flask', 'ext'):
                # List Flask extensions like ``flask_foo``
                for mod in self._get_module_names():
                    modname = mod.string_name
                    if modname.startswith('flask_'):
                        extname = modname[len('flask_'):]
                        names.append(ImportName(self.module_context, extname))
                # Now the old style: ``flaskext.foo``
                for dir in self._sys_path_with_modifications():
                    flaskext = os.path.join(dir, 'flaskext')
                    if os.path.isdir(flaskext):
                        names += self._get_module_names([flaskext])

            contexts = self.follow()
            for context in contexts:
                # Non-modules are not completable.
                if context.api_type != 'module':  # not a module
                    continue
                names += context.sub_modules_dict().values()

            if not only_modules:
                from jedi.evaluate.gradual.conversion import stub_to_actual_context_set
                both_contexts = ContextSet.from_sets(
                    stub_to_actual_context_set(context, ignore_compiled=True)
                    for context in contexts if context.is_stub()) | contexts
                for c in both_contexts:
                    for filter in c.get_filters(search_global=False):
                        names += filter.values()
        else:
            if self.level:
                # We only get here if the level cannot be properly calculated.
                names += self._get_module_names(self._fixed_sys_path)
            else:
                # This is just the list of global imports.
                names += self._get_module_names()
        return names
Ejemplo n.º 30
0
 def py__call__(self, item_context_set):
     context_set = NO_CONTEXTS
     for args_context in self._args_context_set:
         lazy_contexts = list(args_context.py__iter__())
         if len(lazy_contexts) == 1:
             # TODO we need to add the contextualized context.
             context_set |= item_context_set.get_item(lazy_contexts[0].infer(), None)
         else:
             context_set |= ContextSet([iterable.FakeSequence(
                 self._wrapped_context.evaluator,
                 'list',
                 [
                     LazyKnownContexts(item_context_set.get_item(lazy_context.infer(), None))
                     for lazy_context in lazy_contexts
                 ],
             )])
     return context_set
Ejemplo n.º 31
0
def _execute_array_values(evaluator, array):
    """
    Tuples indicate that there's not just one return value, but the listed
    ones.  `(str, int)` means that it returns a tuple with both types.
    """
    from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence
    if isinstance(array, SequenceLiteralContext):
        values = []
        for lazy_context in array.py__iter__():
            objects = ContextSet.from_sets(
                _execute_array_values(evaluator, typ)
                for typ in lazy_context.infer()
            )
            values.append(LazyKnownContexts(objects))
        return set([FakeSequence(evaluator, array.array_type, values)])
    else:
        return array.execute_evaluated()
Ejemplo n.º 32
0
def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts):
    if not left_contexts or not right_contexts:
        # illegal slices e.g. cause left/right_result to be None
        result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS)
        return _literals_to_types(evaluator, result)
    else:
        # I don't think there's a reasonable chance that a string
        # operation is still correct, once we pass something like six
        # objects.
        if len(left_contexts) * len(right_contexts) > 6:
            return _literals_to_types(evaluator, left_contexts | right_contexts)
        else:
            return ContextSet.from_sets(
                _eval_comparison_part(evaluator, context, left, operator, right)
                for left in left_contexts
                for right in right_contexts
            )
Ejemplo n.º 33
0
def builtins_reversed(sequences, obj, arguments):
    # While we could do without this variable (just by using sequences), we
    # want static analysis to work well. Therefore we need to generated the
    # values again.
    key, lazy_context = next(arguments.unpack())
    cn = None
    if isinstance(lazy_context, LazyTreeContext):
        # TODO access private
        cn = ContextualizedNode(lazy_context.context, lazy_context.data)
    ordered = list(sequences.iterate(cn))

    # Repack iterator values and then run it the normal way. This is
    # necessary, because `reversed` is a function and autocompletion
    # would fail in certain cases like `reversed(x).__iter__` if we
    # just returned the result directly.
    seq, = obj.evaluator.typing_module.py__getattribute__('Iterator').execute_evaluated()
    return ContextSet([ReversedObject(seq, list(reversed(ordered)))])
Ejemplo n.º 34
0
def _eval_comparison(evaluator, context, left_contexts, operator, right_contexts):
    if not left_contexts or not right_contexts:
        # illegal slices e.g. cause left/right_result to be None
        result = (left_contexts or NO_CONTEXTS) | (right_contexts or NO_CONTEXTS)
        return _literals_to_types(evaluator, result)
    else:
        # I don't think there's a reasonable chance that a string
        # operation is still correct, once we pass something like six
        # objects.
        if len(left_contexts) * len(right_contexts) > 6:
            return _literals_to_types(evaluator, left_contexts | right_contexts)
        else:
            return ContextSet.from_sets(
                _eval_comparison_part(evaluator, context, left, operator, right)
                for left in left_contexts
                for right in right_contexts
            )
Ejemplo n.º 35
0
    def _infer(self, only_stubs=False, prefer_stubs=False):
        assert not (only_stubs and prefer_stubs)

        if not self._name.is_context_name:
            return []

        # First we need to make sure that we have stub names (if possible) that
        # we can follow. If we don't do that, we can end up with the inferred
        # results of Python objects instead of stubs.
        names = convert_names([self._name], prefer_stubs=True)
        contexts = convert_contexts(
            ContextSet.from_sets(n.infer() for n in names),
            only_stubs=only_stubs,
            prefer_stubs=prefer_stubs,
        )
        resulting_names = [c.name for c in contexts]
        return [self if n == self._name else Definition(self._evaluator, n)
                for n in resulting_names]
Ejemplo n.º 36
0
def jedi_importer_test1a(importer, import_parts, import_path, sys_path):
    module_name = '.'.join(import_parts)
    if module_name == 'mylib':
        implicit_ns_info = ImplicitNSInfo('mylib', [virtual_mod_path])
        module = imports._load_module(
            importer._evaluator,
            implicit_ns_info,
            None,
            sys_path,
            # module_name = module_name,
            import_names=import_parts,
            safe_module_name=True,
        )

        if not module is None:
            return ContextSet(module)

    return NO_CONTEXTS
Ejemplo n.º 37
0
    def py__call__(self, arguments):
        debug.dbg("Execute overloaded function %s", self._wrapped_context, color='BLUE')
        function_executions = []
        context_set = NO_CONTEXTS
        matched = False
        for f in self._overloaded_functions:
            function_execution = f.get_function_execution(arguments)
            function_executions.append(function_execution)
            if function_execution.matches_signature():
                matched = True
                return function_execution.infer()

        if matched:
            return context_set

        if self.evaluator.is_analysis:
            # In this case we want precision.
            return NO_CONTEXTS
        return ContextSet.from_sets(fe.infer() for fe in function_executions)
Ejemplo n.º 38
0
def check_tuple_assignments(evaluator, contextualized_name, context_set):
    """
    Checks if tuples are assigned.
    """
    lazy_context = None
    for index, node in contextualized_name.assignment_indexes():
        cn = ContextualizedNode(contextualized_name.context, node)
        iterated = context_set.iterate(cn)
        for _ in range(index + 1):
            try:
                lazy_context = next(iterated)
            except StopIteration:
                # We could do this with the default param in next. But this
                # would allow this loop to run for a very long time if the
                # index number is high. Therefore break if the loop is
                # finished.
                return ContextSet()
        context_set = lazy_context.infer()
    return context_set
Ejemplo n.º 39
0
def _create(evaluator, access_handle, parent_context, *args):
    compiled_object = create_cached_compiled_object(
        evaluator,
        access_handle,
        parent_context=parent_context.compiled_object)

    result = _find_syntax_node_name(evaluator, access_handle)
    if result is None:
        return compiled_object

    module_node, tree_node, path, code_lines = result

    if parent_context.tree_node.get_root_node() == module_node:
        module_context = parent_context.get_root_context()
    else:
        # TODO this __name__ is probably wrong.
        name = compiled_object.get_root_context().py__name__()
        string_names = tuple(name.split('.'))
        module_context = ModuleContext(
            evaluator,
            module_node,
            path=path,
            string_names=string_names,
            code_lines=code_lines,
            is_package=hasattr(compiled_object, 'py__path__'),
        )
        if name is not None:
            evaluator.module_cache.add(string_names,
                                       ContextSet([module_context]))

    tree_context = module_context.create_context(tree_node,
                                                 node_is_context=True,
                                                 node_is_object=True)
    if tree_node.type == 'classdef':
        if not access_handle.is_class():
            # Is an instance, not a class.
            tree_context, = execute_evaluated(tree_context)

    return MixedObject(evaluator,
                       parent_context,
                       compiled_object,
                       tree_context=tree_context)
Ejemplo n.º 40
0
def _create(evaluator, access_handle, parent_context, *args):
    compiled_object = create_cached_compiled_object(
        evaluator,
        access_handle,
        parent_context=parent_context and parent_context.compiled_object)

    result = _find_syntax_node_name(evaluator, access_handle)
    # TODO use stub contexts here. If we do that we probably have to care about
    # generics from stuff like `[1]`.
    if result is None:
        return compiled_object

    module_node, tree_node, file_io, code_lines = result

    if parent_context is None:
        # TODO this __name__ is probably wrong.
        name = compiled_object.get_root_context().py__name__()
        string_names = tuple(name.split('.'))
        module_context = ModuleContext(
            evaluator,
            module_node,
            file_io=file_io,
            string_names=string_names,
            code_lines=code_lines,
            is_package=hasattr(compiled_object, 'py__path__'),
        )
        if name is not None:
            evaluator.module_cache.add(string_names,
                                       ContextSet([module_context]))
    else:
        assert parent_context.tree_node.get_root_node() == module_node
        module_context = parent_context.get_root_context()

    tree_context = module_context.create_context(tree_node,
                                                 node_is_context=True,
                                                 node_is_object=True)
    if tree_node.type == 'classdef':
        if not access_handle.is_class():
            # Is an instance, not a class.
            tree_context, = execute_evaluated(tree_context)

    return MixedObject(compiled_object, tree_context=tree_context)
Ejemplo n.º 41
0
def builtins_next(evaluator, iterators, defaults):
    """
    TODO this function is currently not used. It's a stab at implementing next
    in a different way than fake objects. This would be a bit more flexible.
    """
    if evaluator.environment.version_info.major == 2:
        name = 'next'
    else:
        name = '__next__'

    context_set = NO_CONTEXTS
    for iterator in iterators:
        if isinstance(iterator, AbstractInstanceContext):
            context_set = ContextSet.from_sets(
                n.infer()
                for filter in iterator.get_filters(include_self_names=True)
                for n in filter.get(name)
            ).execute_evaluated()
    if context_set:
        return context_set
    return defaults
Ejemplo n.º 42
0
    def _names_to_types(self, names, attribute_lookup):
        contexts = ContextSet.from_sets(name.infer() for name in names)

        debug.dbg('finder._names_to_types: %s -> %s', names, contexts)
        if not names and isinstance(self._context, AbstractInstanceContext):
            # handling __getattr__ / __getattribute__
            return self._check_getattr(self._context)

        # Add isinstance and other if/assert knowledge.
        if not contexts and isinstance(self._name, tree.Name) and \
                not isinstance(self._name_context, AbstractInstanceContext):
            flow_scope = self._name
            base_node = self._name_context.tree_node
            if base_node.type == 'comp_for':
                return contexts
            while True:
                flow_scope = get_parent_scope(flow_scope, include_flows=True)
                n = _check_flow_information(self._name_context, flow_scope,
                                            self._name, self._position)
                if n is not None:
                    return n
                if flow_scope == base_node:
                    break
        return contexts
Ejemplo n.º 43
0
 def execute_function_slots(self, names, *evaluated_args):
     return ContextSet.from_sets(
         name.execute_evaluated(*evaluated_args)
         for name in names
     )
Ejemplo n.º 44
0
 def dict_values(self):
     return ContextSet.from_sets(lazy_context.infer() for lazy_context in self._dct.values())
Ejemplo n.º 45
0
def eval_node(context, element):
    debug.dbg('eval_node %s@%s', element, element.start_pos)
    evaluator = context.evaluator
    typ = element.type
    if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword'):
        return eval_atom(context, element)
    elif typ == 'lambdef':
        return ContextSet(FunctionContext(evaluator, context, element))
    elif typ == 'expr_stmt':
        return eval_expr_stmt(context, element)
    elif typ in ('power', 'atom_expr'):
        first_child = element.children[0]
        children = element.children[1:]
        had_await = False
        if first_child.type == 'keyword' and first_child.value == 'await':
            had_await = True
            first_child = children.pop(0)

        context_set = eval_atom(context, first_child)
        for trailer in children:
            if trailer == '**':  # has a power operation.
                right = context.eval_node(children[1])
                context_set = _eval_comparison(
                    evaluator,
                    context,
                    context_set,
                    trailer,
                    right
                )
                break
            context_set = eval_trailer(context, context_set, trailer)

        if had_await:
            await_context_set = context_set.py__getattribute__(u"__await__")
            if not await_context_set:
                debug.warning('Tried to run py__await__ on context %s', context)
            context_set = ContextSet()
            return _py__stop_iteration_returns(await_context_set.execute_evaluated())
        return context_set
    elif typ in ('testlist_star_expr', 'testlist',):
        # The implicit tuple in statements.
        return ContextSet(iterable.SequenceLiteralContext(evaluator, context, element))
    elif typ in ('not_test', 'factor'):
        context_set = context.eval_node(element.children[-1])
        for operator in element.children[:-1]:
            context_set = eval_factor(context_set, operator)
        return context_set
    elif typ == 'test':
        # `x if foo else y` case.
        return (context.eval_node(element.children[0]) |
                context.eval_node(element.children[-1]))
    elif typ == 'operator':
        # Must be an ellipsis, other operators are not evaluated.
        # In Python 2 ellipsis is coded as three single dot tokens, not
        # as one token 3 dot token.
        if element.value not in ('.', '...'):
            origin = element.parent
            raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
        return ContextSet(compiled.builtin_from_name(evaluator, u'Ellipsis'))
    elif typ == 'dotted_name':
        context_set = eval_atom(context, element.children[0])
        for next_name in element.children[2::2]:
            # TODO add search_global=True?
            context_set = context_set.py__getattribute__(next_name, name_context=context)
        return context_set
    elif typ == 'eval_input':
        return eval_node(context, element.children[0])
    elif typ == 'annassign':
        return pep0484._evaluate_for_annotation(context, element.children[1])
    elif typ == 'yield_expr':
        if len(element.children) and element.children[1].type == 'yield_arg':
            # Implies that it's a yield from.
            element = element.children[1].children[1]
            generators = context.eval_node(element)
            return _py__stop_iteration_returns(generators)

        # Generator.send() is not implemented.
        return NO_CONTEXTS
    else:
        return eval_or_test(context, element)
Ejemplo n.º 46
0
 def _values(self):
     """Returns a list of a list of node."""
     if self.array_type == u'dict':
         return ContextSet.from_sets(v for k, v in self._items())
     else:
         return self._items()
Ejemplo n.º 47
0
 def infer(self):
     return ContextSet.from_sets(p.infer() for p in self._executed_params)
Ejemplo n.º 48
0
 def eval_docstring(docstring):
     return ContextSet.from_iterable(
         p
         for param_str in _search_param_in_docstr(docstring, param.name.value)
         for p in _evaluate_for_statement_string(module_context, param_str)
     )
Ejemplo n.º 49
0
 def dict_values(self):
     return ContextSet.from_sets(values for keys, values in self._iterate())
Ejemplo n.º 50
0
 def dict_values(self):
     return ContextSet.from_iterable(
         create_from_access_path(self.evaluator, access)
         for access in self.access_handle.dict_values()
     )
Ejemplo n.º 51
0
 def dict_values(self):
     return ContextSet.from_sets(
         self._defining_context.eval_node(v)
         for k, v in self._items()
     )
Ejemplo n.º 52
0
 def py__next__(self):
     return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
Ejemplo n.º 53
0
 def execute(arguments):
     return ContextSet.from_sets(name.execute(arguments) for name in names)
Ejemplo n.º 54
0
 def infer(self):
     return ContextSet.from_sets(l.infer() for l in self.data)
Ejemplo n.º 55
0
 def py__getitem__(self, index):
     return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())
Ejemplo n.º 56
0
 def py__next__(self):
     # TODO add TypeError if params are given.
     return ContextSet.from_sets(lazy_context.infer() for lazy_context in self.py__iter__())