Beispiel #1
0
def _name_to_types(evaluator, context, tree_name):
    types = []
    node = tree_name.get_definition(import_name_always=True)
    if node is None:
        node = tree_name.parent
        if node.type == 'global_stmt':
            context = evaluator.create_context(context, tree_name)
            finder = NameFinder(evaluator, context, context, tree_name.value)
            filters = finder.get_filters(search_global=True)
            # For global_stmt lookups, we only need the first possible scope,
            # which means the function itself.
            filters = [next(filters)]
            return finder.find(filters, attribute_lookup=False)
        elif node.type not in ('import_from', 'import_name'):
            raise ValueError("Should not happen.")

    typ = node.type
    if typ == 'for_stmt':
        types = pep0484.find_type_from_comment_hint_for(
            context, node, tree_name)
        if types:
            return types
    if typ == 'with_stmt':
        types = pep0484.find_type_from_comment_hint_with(
            context, node, tree_name)
        if types:
            return types
    if typ in ('for_stmt', 'comp_for'):
        try:
            types = context.predefined_names[node][tree_name.value]
        except KeyError:
            cn = ContextualizedNode(context, node.children[3])
            for_types = iterable.py__iter__types(evaluator, cn.infer(), cn)
            c_node = ContextualizedName(context, tree_name)
            types = check_tuple_assignments(evaluator, c_node, for_types)
    elif typ == 'expr_stmt':
        types = _remove_statements(evaluator, context, node, tree_name)
    elif typ == 'with_stmt':
        context_managers = context.eval_node(
            node.get_test_node_from_name(tree_name))
        enter_methods = unite(
            context_manager.py__getattribute__('__enter__')
            for context_manager in context_managers)
        types = unite(method.execute_evaluated() for method in enter_methods)
    elif typ in ('import_from', 'import_name'):
        types = imports.infer_import(context, tree_name)
    elif typ in ('funcdef', 'classdef'):
        types = _apply_decorators(evaluator, context, node)
    elif typ == 'try_stmt':
        # TODO an exception can also be a tuple. Check for those.
        # TODO check for types that are not classes and add it to
        # the static analysis report.
        exceptions = context.eval_node(
            tree_name.get_previous_sibling().get_previous_sibling())
        types = unite(
            evaluator.execute(t, param.ValuesArguments([]))
            for t in exceptions)
    else:
        raise ValueError("Should not happen.")
    return types
Beispiel #2
0
def _name_to_types(evaluator, context, tree_name):
    types = []
    node = tree_name.get_definition(import_name_always=True)
    if node is None:
        node = tree_name.parent
        if node.type == 'global_stmt':
            context = evaluator.create_context(context, tree_name)
            finder = NameFinder(evaluator, context, context, tree_name.value)
            filters = finder.get_filters(search_global=True)
            # For global_stmt lookups, we only need the first possible scope,
            # which means the function itself.
            filters = [next(filters)]
            return finder.find(filters, attribute_lookup=False)
        elif node.type not in ('import_from', 'import_name'):
            raise ValueError("Should not happen.")

    typ = node.type
    if typ == 'for_stmt':
        types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
        if types:
            return types
    if typ == 'with_stmt':
        types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
        if types:
            return types
    if typ in ('for_stmt', 'comp_for'):
        try:
            types = context.predefined_names[node][tree_name.value]
        except KeyError:
            cn = ContextualizedNode(context, node.children[3])
            for_types = iterable.py__iter__types(evaluator, cn.infer(), cn)
            c_node = ContextualizedName(context, tree_name)
            types = check_tuple_assignments(evaluator, c_node, for_types)
    elif typ == 'expr_stmt':
        types = _remove_statements(evaluator, context, node, tree_name)
    elif typ == 'with_stmt':
        context_managers = context.eval_node(node.get_test_node_from_name(tree_name))
        enter_methods = unite(
            context_manager.py__getattribute__('__enter__')
            for context_manager in context_managers
        )
        types = unite(method.execute_evaluated() for method in enter_methods)
    elif typ in ('import_from', 'import_name'):
        types = imports.infer_import(context, tree_name)
    elif typ in ('funcdef', 'classdef'):
        types = _apply_decorators(evaluator, context, node)
    elif typ == 'try_stmt':
        # TODO an exception can also be a tuple. Check for those.
        # TODO check for types that are not classes and add it to
        # the static analysis report.
        exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
        types = unite(
            evaluator.execute(t, param.ValuesArguments([]))
            for t in exceptions
        )
    else:
        raise ValueError("Should not happen.")
    return types
Beispiel #3
0
def py__iter__types(evaluator, types, node=None):
    """
    Calls `py__iter__`, but ignores the ordering in the end and just returns
    all types that it contains.
    """
    return unite(lazy_context.infer()
                 for lazy_context in py__iter__(evaluator, types, node))
Beispiel #4
0
def builtins_super(evaluator, types, objects, context):
    # TODO make this able to detect multiple inheritance super
    if isinstance(context, (InstanceFunctionExecution,
                            AnonymousInstanceFunctionExecution)):
        su = context.instance.py__class__().py__bases__()
        return unite(context.execute_evaluated() for context in su[0].infer())
    return set()
Beispiel #5
0
def _check_isinstance_type(evaluator, element, search_name):
    try:
        assert element.type in ('power', 'atom_expr')
        # this might be removed if we analyze and, etc
        assert len(element.children) == 2
        first, trailer = element.children
        assert isinstance(first, tree.Name) and first.value == 'isinstance'
        assert trailer.type == 'trailer' and trailer.children[0] == '('
        assert len(trailer.children) == 3

        # arglist stuff
        arglist = trailer.children[1]
        args = param.Arguments(evaluator, arglist, trailer)
        lst = list(args.unpack())
        # Disallow keyword arguments
        assert len(lst) == 2 and lst[0][0] is None and lst[1][0] is None
        name = lst[0][1][0]  # first argument, values, first value
        # Do a simple get_code comparison. They should just have the same code,
        # and everything will be all right.
        classes = lst[1][1][0]
        call = helpers.call_of_leaf(search_name)
        assert name.get_code(normalized=True) == call.get_code(normalized=True)
    except AssertionError:
        return set()

    result = set()
    for cls_or_tup in evaluator.eval_element(classes):
        if isinstance(cls_or_tup,
                      iterable.Array) and cls_or_tup.type == 'tuple':
            for typ in unite(cls_or_tup.py__iter__()):
                result |= evaluator.execute(typ)
        else:
            result |= evaluator.execute(cls_or_tup)
    return result
Beispiel #6
0
    def _names_to_types(self, names, attribute_lookup):
        types = set()

        types = unite(name.infer() for name in names)

        debug.dbg('finder._names_to_types: %s -> %s', names, types)
        if not names and isinstance(self._context, AbstractInstanceContext):
            # handling __getattr__ / __getattribute__
            return self._check_getattr(self._context)

        # Add isinstance and other if/assert knowledge.
        if not types and isinstance(self._name, tree.Name) and \
                not isinstance(self._name_context, AbstractInstanceContext):
            flow_scope = self._name
            base_node = self._name_context.tree_node
            if base_node.type == 'comp_for':
                return types
            while True:
                flow_scope = get_parent_scope(flow_scope, include_flows=True)
                n = _check_flow_information(self._name_context, flow_scope,
                                            self._name, self._position)
                if n is not None:
                    return n
                if flow_scope == base_node:
                    break
        return types
Beispiel #7
0
    def check_try_for_except(obj, exception):
        # Only nodes in try
        iterator = iter(obj.children)
        for branch_type in iterator:
            colon = next(iterator)
            suite = next(iterator)
            if branch_type == 'try' \
                    and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos):
                return False

        for node in obj.except_clauses():
            if node is None:
                return True  # An exception block that catches everything.
            else:
                except_classes = evaluator.eval_element(node)
                for cls in except_classes:
                    from jedi.evaluate import iterable
                    if isinstance(cls, iterable.Array) and cls.type == 'tuple':
                        # multiple exceptions
                        for typ in unite(cls.py__iter__()):
                            if check_match(typ, exception):
                                return True
                    else:
                        if check_match(cls, exception):
                            return True
def _follow_param(evaluator, arguments, index):
    try:
        key, values = list(arguments.unpack())[index]
    except IndexError:
        return set()
    else:
        return unite(evaluator.eval_element(v) for v in values)
Beispiel #9
0
def _follow_param(evaluator, arguments, index):
    try:
        key, values = list(arguments.unpack())[index]
    except IndexError:
        return set()
    else:
        return unite(evaluator.eval_element(v) for v in values)
Beispiel #10
0
def builtins_isinstance(evaluator, objects, types, arguments):
    bool_results = set([])
    for o in objects:
        try:
            mro_func = o.py__class__().py__mro__
        except AttributeError:
            # This is temporary. Everything should have a class attribute in
            # Python?! Maybe we'll leave it here, because some numpy objects or
            # whatever might not.
            return set([compiled.create(True), compiled.create(False)])

        mro = mro_func()

        for cls_or_tup in types:
            if cls_or_tup.is_class():
                bool_results.add(cls_or_tup in mro)
            elif cls_or_tup.name.string_name == 'tuple' \
                    and cls_or_tup.get_root_context() == evaluator.BUILTINS:
                # Check for tuples.
                classes = unite(
                    lazy_context.infer()
                    for lazy_context in cls_or_tup.py__iter__()
                )
                bool_results.add(any(cls in mro for cls in classes))
            else:
                _, lazy_context = list(arguments.unpack())[1]
                if isinstance(lazy_context, LazyTreeContext):
                    node = lazy_context.data
                    message = 'TypeError: isinstance() arg 2 must be a ' \
                              'class, type, or tuple of classes and types, ' \
                              'not %s.' % cls_or_tup
                    analysis.add(lazy_context._context, 'type-error-isinstance', node, message)

    return set(compiled.create(evaluator, x) for x in bool_results)
Beispiel #11
0
    def check_try_for_except(obj, exception):
        # Only nodes in try
        iterator = iter(obj.children)
        for branch_type in iterator:
            colon = next(iterator)
            suite = next(iterator)
            if branch_type == 'try' \
                    and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos):
                return False

        for node in obj.except_clauses():
            if node is None:
                return True  # An exception block that catches everything.
            else:
                except_classes = evaluator.eval_element(node)
                for cls in except_classes:
                    from jedi.evaluate import iterable
                    if isinstance(cls, iterable.Array) and cls.type == 'tuple':
                        # multiple exceptions
                        for typ in unite(cls.py__iter__()):
                            if check_match(typ, exception):
                                return True
                    else:
                        if check_match(cls, exception):
                            return True
Beispiel #12
0
def _check_isinstance_type(evaluator, element, search_name):
    try:
        assert element.type in ("power", "atom_expr")
        # this might be removed if we analyze and, etc
        assert len(element.children) == 2
        first, trailer = element.children
        assert isinstance(first, tree.Name) and first.value == "isinstance"
        assert trailer.type == "trailer" and trailer.children[0] == "("
        assert len(trailer.children) == 3

        # arglist stuff
        arglist = trailer.children[1]
        args = param.Arguments(evaluator, arglist, trailer)
        lst = list(args.unpack())
        # Disallow keyword arguments
        assert len(lst) == 2 and lst[0][0] is None and lst[1][0] is None
        name = lst[0][1][0]  # first argument, values, first value
        # Do a simple get_code comparison. They should just have the same code,
        # and everything will be all right.
        classes = lst[1][1][0]
        call = helpers.call_of_leaf(search_name)
        assert name.get_code(normalized=True) == call.get_code(normalized=True)
    except AssertionError:
        return set()

    result = set()
    for cls_or_tup in evaluator.eval_element(classes):
        if isinstance(cls_or_tup, iterable.Array) and cls_or_tup.type == "tuple":
            for typ in unite(cls_or_tup.py__iter__()):
                result |= evaluator.execute(typ)
        else:
            result |= evaluator.execute(cls_or_tup)
    return result
def builtins_isinstance(evaluator, objects, types, arguments):
    bool_results = set([])
    for o in objects:
        try:
            mro_func = o.py__class__().py__mro__
        except AttributeError:
            # This is temporary. Everything should have a class attribute in
            # Python?! Maybe we'll leave it here, because some numpy objects or
            # whatever might not.
            return set([compiled.create(True), compiled.create(False)])

        mro = mro_func()

        for cls_or_tup in types:
            if cls_or_tup.is_class():
                bool_results.add(cls_or_tup in mro)
            elif cls_or_tup.name.string_name == 'tuple' \
                    and cls_or_tup.get_root_context() == evaluator.BUILTINS:
                # Check for tuples.
                classes = unite(lazy_context.infer()
                                for lazy_context in cls_or_tup.py__iter__())
                bool_results.add(any(cls in mro for cls in classes))
            else:
                _, lazy_context = list(arguments.unpack())[1]
                if isinstance(lazy_context, LazyTreeContext):
                    node = lazy_context.data
                    message = 'TypeError: isinstance() arg 2 must be a ' \
                              'class, type, or tuple of classes and types, ' \
                              'not %s.' % cls_or_tup
                    analysis.add(lazy_context._context,
                                 'type-error-isinstance', node, message)

    return set(compiled.create(evaluator, x) for x in bool_results)
Beispiel #14
0
    def _names_to_types(self, names, attribute_lookup):
        types = set()

        types = unite(name.infer() for name in names)

        debug.dbg('finder._names_to_types: %s -> %s', names, types)
        if not names and isinstance(self._context, AbstractInstanceContext):
            # handling __getattr__ / __getattribute__
            return self._check_getattr(self._context)

        # Add isinstance and other if/assert knowledge.
        if not types and isinstance(self._name, tree.Name) and \
                not isinstance(self._name_context, AbstractInstanceContext):
            flow_scope = self._name
            base_node = self._name_context.tree_node
            if base_node.type == 'comp_for':
                return types
            while True:
                flow_scope = get_parent_scope(flow_scope, include_flows=True)
                n = _check_flow_information(self._name_context, flow_scope,
                                            self._name, self._position)
                if n is not None:
                    return n
                if flow_scope == base_node:
                    break
        return types
Beispiel #15
0
 def _eval_element_not_cached(self, context, element):
     debug.dbg('eval_element %s@%s', element, element.start_pos)
     types = set()
     typ = element.type
     if typ in ('name', 'number', 'string', 'atom'):
         types = self.eval_atom(context, element)
     elif typ == 'keyword':
         # For False/True/None
         if element.value in ('False', 'True', 'None'):
             types.add(compiled.builtin_from_name(self, element.value))
         # else: print e.g. could be evaluated like this in Python 2.7
     elif typ == 'lambdef':
         types = set([er.FunctionContext(self, context, element)])
     elif typ == 'expr_stmt':
         types = self.eval_statement(context, element)
     elif typ in ('power', 'atom_expr'):
         first_child = element.children[0]
         if not (first_child.type == 'keyword' and first_child.value == 'await'):
             types = self.eval_atom(context, first_child)
             for trailer in element.children[1:]:
                 if trailer == '**':  # has a power operation.
                     right = self.eval_element(context, element.children[2])
                     types = set(precedence.calculate(self, context, types, trailer, right))
                     break
                 types = self.eval_trailer(context, types, trailer)
     elif typ in ('testlist_star_expr', 'testlist',):
         # The implicit tuple in statements.
         types = set([iterable.SequenceLiteralContext(self, context, element)])
     elif typ in ('not_test', 'factor'):
         types = self.eval_element(context, element.children[-1])
         for operator in element.children[:-1]:
             types = set(precedence.factor_calculate(self, types, operator))
     elif typ == 'test':
         # `x if foo else y` case.
         types = (self.eval_element(context, element.children[0]) |
                  self.eval_element(context, element.children[-1]))
     elif typ == 'operator':
         # Must be an ellipsis, other operators are not evaluated.
         # In Python 2 ellipsis is coded as three single dot tokens, not
         # as one token 3 dot token.
         assert element.value in ('.', '...')
         types = set([compiled.create(self, Ellipsis)])
     elif typ == 'dotted_name':
         types = self.eval_atom(context, element.children[0])
         for next_name in element.children[2::2]:
             # TODO add search_global=True?
             types = unite(
                 typ.py__getattribute__(next_name, name_context=context)
                 for typ in types
             )
         types = types
     elif typ == 'eval_input':
         types = self._eval_element_not_cached(context, element.children[0])
     elif typ == 'annassign':
         types = pep0484._evaluate_for_annotation(context, element.children[1])
     else:
         types = precedence.calculate_children(self, context, element.children)
     debug.dbg('eval_element result %s', types)
     return types
Beispiel #16
0
 def py__getitem__(self, index):
     all_types = list(self.py__iter__())
     result = all_types[index]
     if isinstance(index, slice):
         return create_evaluated_sequence_set(self._evaluator,
                                              unite(result),
                                              sequence_type='list')
     return result
Beispiel #17
0
def _execute_types_in_stmt(module_context, stmt):
    """
    Executing all types or general elements that we find in a statement. This
    doesn't include tuple, list and dict literals, because the stuff they
    contain is executed. (Used as type information).
    """
    definitions = module_context.eval_node(stmt)
    return unite(_execute_array_values(module_context.evaluator, d) for d in definitions)
Beispiel #18
0
def _execute_types_in_stmt(module_context, stmt):
    """
    Executing all types or general elements that we find in a statement. This
    doesn't include tuple, list and dict literals, because the stuff they
    contain is executed. (Used as type information).
    """
    definitions = module_context.eval_node(stmt)
    return unite(_execute_array_values(module_context.evaluator, d) for d in definitions)
def builtins_super(evaluator, types, objects, context):
    # TODO make this able to detect multiple inheritance super
    if isinstance(
            context,
        (InstanceFunctionExecution, AnonymousInstanceFunctionExecution)):
        su = context.instance.py__class__().py__bases__()
        return unite(context.execute_evaluated() for context in su[0].infer())
    return set()
Beispiel #20
0
def py__iter__types(evaluator, types, contextualized_node=None):
    """
    Calls `py__iter__`, but ignores the ordering in the end and just returns
    all types that it contains.
    """
    return unite(
        lazy_context.infer()
        for lazy_context in py__iter__(evaluator, types, contextualized_node)
    )
 def py__getitem__(self, index):
     all_types = list(self.py__iter__())
     result = all_types[index]
     if isinstance(index, slice):
         return create_evaluated_sequence_set(
             self._evaluator,
             unite(result),
             sequence_type='list'
         )
     return result
Beispiel #22
0
    def defined_names(self):
        """
        List sub-definitions (e.g., methods in class).

        :rtype: list of Definition
        """
        defs = self._name.infer()
        return sorted(common.unite(
            defined_names(self._evaluator, d) for d in defs),
                      key=lambda s: s._name.start_pos or (0, 0))
Beispiel #23
0
    def defined_names(self):
        """
        List sub-definitions (e.g., methods in class).

        :rtype: list of Definition
        """
        defs = self._name.infer()
        return sorted(
            common.unite(defined_names(self._evaluator, d) for d in defs),
            key=lambda s: s._name.start_pos or (0, 0)
        )
Beispiel #24
0
def py__getitem__(context, typ, node):
    if not typ.get_root_context().name.string_name == "typing":
        return None
    # we assume that any class using [] in a module called
    # "typing" with a name for which we have a replacement
    # should be replaced by that class. This is not 100%
    # airtight but I don't have a better idea to check that it's
    # actually the PEP-0484 typing module and not some other
    if node.type == "subscriptlist":
        nodes = node.children[::2]  # skip the commas
    else:
        nodes = [node]
    del node

    nodes = [_fix_forward_reference(context, node) for node in nodes]
    type_name = typ.name.string_name

    # hacked in Union and Optional, since it's hard to do nicely in parsed code
    if type_name in ("Union", '_Union'):
        # In Python 3.6 it's still called typing.Union but it's an instance
        # called _Union.
        return unite(context.eval_node(node) for node in nodes)
    if type_name in ("Optional", '_Optional'):
        # Here we have the same issue like in Union. Therefore we also need to
        # check for the instance typing._Optional (Python 3.6).
        return context.eval_node(nodes[0])

    from jedi.evaluate.representation import ModuleContext
    typing = ModuleContext(
        context.evaluator,
        module_node=_get_typing_replacement_module(),
        path=None
    )
    factories = typing.py__getattribute__("factory")
    assert len(factories) == 1
    factory = list(factories)[0]
    assert factory
    function_body_nodes = factory.tree_node.children[4].children
    valid_classnames = set(child.name.value
                           for child in function_body_nodes
                           if isinstance(child, tree.Class))
    if type_name not in valid_classnames:
        return None
    compiled_classname = compiled.create(context.evaluator, type_name)

    from jedi.evaluate.iterable import FakeSequence
    args = FakeSequence(
        context.evaluator,
        "tuple",
        [LazyTreeContext(context, n) for n in nodes]
    )

    result = factory.execute_evaluated(compiled_classname, args)
    return result
Beispiel #25
0
def py__getitem__(context, typ, node):
    if not typ.get_root_context().name.string_name == "typing":
        return None
    # we assume that any class using [] in a module called
    # "typing" with a name for which we have a replacement
    # should be replaced by that class. This is not 100%
    # airtight but I don't have a better idea to check that it's
    # actually the PEP-0484 typing module and not some other
    if node.type == "subscriptlist":
        nodes = node.children[::2]  # skip the commas
    else:
        nodes = [node]
    del node

    nodes = [_fix_forward_reference(context, node) for node in nodes]
    type_name = typ.name.string_name

    # hacked in Union and Optional, since it's hard to do nicely in parsed code
    if type_name in ("Union", '_Union'):
        # In Python 3.6 it's still called typing.Union but it's an instance
        # called _Union.
        return unite(context.eval_node(node) for node in nodes)
    if type_name in ("Optional", '_Optional'):
        # Here we have the same issue like in Union. Therefore we also need to
        # check for the instance typing._Optional (Python 3.6).
        return context.eval_node(nodes[0])

    from jedi.evaluate.representation import ModuleContext
    typing = ModuleContext(
        context.evaluator,
        module_node=_get_typing_replacement_module(context.evaluator.latest_grammar),
        path=None
    )
    factories = typing.py__getattribute__("factory")
    assert len(factories) == 1
    factory = list(factories)[0]
    assert factory
    function_body_nodes = factory.tree_node.children[4].children
    valid_classnames = set(child.name.value
                           for child in function_body_nodes
                           if isinstance(child, tree.Class))
    if type_name not in valid_classnames:
        return None
    compiled_classname = compiled.create(context.evaluator, type_name)

    from jedi.evaluate.iterable import FakeSequence
    args = FakeSequence(
        context.evaluator,
        "tuple",
        [LazyTreeContext(context, n) for n in nodes]
    )

    result = factory.execute_evaluated(compiled_classname, args)
    return result
Beispiel #26
0
def infer_import(context, tree_name, is_goto=False):
    module_context = context.get_root_context()
    import_node = search_ancestor(tree_name, 'import_name', 'import_from')
    import_path = import_node.get_path_for_name(tree_name)
    from_import_name = None
    evaluator = context.evaluator
    try:
        from_names = import_node.get_from_names()
    except AttributeError:
        # Is an import_name
        pass
    else:
        if len(from_names) + 1 == len(import_path):
            # We have to fetch the from_names part first and then check
            # if from_names exists in the modules.
            from_import_name = import_path[-1]
            import_path = from_names

    importer = Importer(evaluator, tuple(import_path),
                        module_context, import_node.level)

    types = importer.follow()

    #if import_node.is_nested() and not self.nested_resolve:
    #    scopes = [NestedImportModule(module, import_node)]

    if not types:
        return set()

    if from_import_name is not None:
        types = unite(
            t.py__getattribute__(
                from_import_name,
                name_context=context,
                is_goto=is_goto,
                analysis_errors=False
            ) for t in types
        )

        if not types:
            path = import_path + [from_import_name]
            importer = Importer(evaluator, tuple(path),
                                module_context, import_node.level)
            types = importer.follow()
            # goto only accepts `Name`
            if is_goto:
                types = set(s.name for s in types)
    else:
        # goto only accepts `Name`
        if is_goto:
            types = set(s.name for s in types)

    debug.dbg('after import: %s', types)
    return types
Beispiel #27
0
def _execute_array_values(evaluator, array):
    """
    Tuples indicate that there's not just one return value, but the listed
    ones.  `(str, int)` means that it returns a tuple with both types.
    """
    if isinstance(array, SequenceLiteralContext):
        values = []
        for lazy_context in array.py__iter__():
            objects = unite(_execute_array_values(evaluator, typ) for typ in lazy_context.infer())
            values.append(context.LazyKnownContexts(objects))
        return set([FakeSequence(evaluator, array.array_type, values)])
    else:
        return array.execute_evaluated()
Beispiel #28
0
def _execute_array_values(evaluator, array):
    """
    Tuples indicate that there's not just one return value, but the listed
    ones.  `(str, int)` means that it returns a tuple with both types.
    """
    if isinstance(array, SequenceLiteralContext):
        values = []
        for lazy_context in array.py__iter__():
            objects = unite(_execute_array_values(evaluator, typ) for typ in lazy_context.infer())
            values.append(context.LazyKnownContexts(objects))
        return set([FakeSequence(evaluator, array.array_type, values)])
    else:
        return array.execute_evaluated()
Beispiel #29
0
def infer_import(context, tree_name, is_goto=False):
    module_context = context.get_root_context()
    import_node = search_ancestor(tree_name, 'import_name', 'import_from')
    import_path = import_node.get_path_for_name(tree_name)
    from_import_name = None
    evaluator = context.evaluator
    try:
        from_names = import_node.get_from_names()
    except AttributeError:
        # Is an import_name
        pass
    else:
        if len(from_names) + 1 == len(import_path):
            # We have to fetch the from_names part first and then check
            # if from_names exists in the modules.
            from_import_name = import_path[-1]
            import_path = from_names

    importer = Importer(evaluator, tuple(import_path), module_context,
                        import_node.level)

    types = importer.follow()

    #if import_node.is_nested() and not self.nested_resolve:
    #    scopes = [NestedImportModule(module, import_node)]

    if not types:
        return set()

    if from_import_name is not None:
        types = unite(
            t.py__getattribute__(from_import_name,
                                 name_context=context,
                                 is_goto=is_goto,
                                 analysis_errors=False) for t in types)

        if not types:
            path = import_path + [from_import_name]
            importer = Importer(evaluator, tuple(path), module_context,
                                import_node.level)
            types = importer.follow()
            # goto only accepts `Name`
            if is_goto:
                types = set(s.name for s in types)
    else:
        # goto only accepts `Name`
        if is_goto:
            types = set(s.name for s in types)

    debug.dbg('after import: %s', types)
    return types
Beispiel #30
0
 def check_additions(arglist, add_name):
     params = list(param.Arguments(evaluator, arglist).unpack())
     result = set()
     if add_name in ['insert']:
         params = params[1:]
     if add_name in ['append', 'add', 'insert']:
         for key, nodes in params:
             result |= unite(evaluator.eval_element(node) for node in nodes)
     elif add_name in ['extend', 'update']:
         for key, nodes in params:
             for node in nodes:
                 types = evaluator.eval_element(node)
                 result |= py__iter__types(evaluator, types, node)
     return result
Beispiel #31
0
 def check_additions(arglist, add_name):
     params = list(param.Arguments(evaluator, arglist).unpack())
     result = set()
     if add_name in ['insert']:
         params = params[1:]
     if add_name in ['append', 'add', 'insert']:
         for key, nodes in params:
             result |= unite(evaluator.eval_element(node) for node in nodes)
     elif add_name in ['extend', 'update']:
         for key, nodes in params:
             for node in nodes:
                 types = evaluator.eval_element(node)
                 result |= py__iter__types(evaluator, types, node)
     return result
Beispiel #32
0
def _name_to_types(evaluator, context, tree_name):
    types = []
    node = tree_name.get_definition()
    if node.type == 'for_stmt':
        types = pep0484.find_type_from_comment_hint_for(
            context, node, tree_name)
        if types:
            return types
    if node.type == 'with_stmt':
        types = pep0484.find_type_from_comment_hint_with(
            context, node, tree_name)
        if types:
            return types
    if node.type in ('for_stmt', 'comp_for'):
        try:
            types = context.predefined_names[node][tree_name.value]
        except KeyError:
            container_types = context.eval_node(node.children[3])
            for_types = iterable.py__iter__types(evaluator, container_types,
                                                 node.children[3])
            types = check_tuple_assignments(evaluator, for_types, tree_name)
    elif node.type == 'expr_stmt':
        types = _remove_statements(evaluator, context, node, tree_name)
    elif node.type == 'with_stmt':
        types = context.eval_node(node.node_from_name(tree_name))
    elif isinstance(node, tree.Import):
        types = imports.infer_import(context, tree_name)
    elif node.type in ('funcdef', 'classdef'):
        types = _apply_decorators(evaluator, context, node)
    elif node.type == 'global_stmt':
        context = evaluator.create_context(context, tree_name)
        finder = NameFinder(evaluator, context, context, str(tree_name))
        filters = finder.get_filters(search_global=True)
        # For global_stmt lookups, we only need the first possible scope,
        # which means the function itself.
        filters = [next(filters)]
        types += finder.find(filters, attribute_lookup=False)
    elif isinstance(node, tree.TryStmt):
        # TODO an exception can also be a tuple. Check for those.
        # TODO check for types that are not classes and add it to
        # the static analysis report.
        exceptions = context.eval_node(
            tree_name.get_previous_sibling().get_previous_sibling())
        types = unite(
            evaluator.execute(t, param.ValuesArguments([]))
            for t in exceptions)
    else:
        raise ValueError("Should not happen.")
    return types
Beispiel #33
0
    def py__iter__(self):
        try:
            _, first_nodes = next(self.var_args.unpack())
        except StopIteration:
            types = set()
        else:
            types = unite(self._evaluator.eval_element(node) for node in first_nodes)
            for types in py__iter__(self._evaluator, types, first_nodes[0]):
                yield types

        module = self.var_args.get_parent_until()
        is_list = str(self.instance.name) == 'list'
        additions = _check_array_additions(self._evaluator, self.instance, module, is_list)
        if additions:
            yield additions
Beispiel #34
0
def py__iter__(evaluator, types, node=None):
    debug.dbg('py__iter__')
    type_iters = []
    for typ in types:
        try:
            iter_method = typ.py__iter__
        except AttributeError:
            if node is not None:
                analysis.add(evaluator, 'type-error-not-iterable', node,
                             message="TypeError: '%s' object is not iterable" % typ)
        else:
            type_iters.append(iter_method())
            #for result in iter_method():
                #yield result

    for t in zip_longest(*type_iters, fillvalue=set()):
        yield unite(t)
Beispiel #35
0
def _name_to_types(evaluator, context, tree_name):
    types = []
    node = tree_name.get_definition()
    if node.isinstance(tree.ForStmt):
        types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
        if types:
            return types
    if node.isinstance(tree.WithStmt):
        types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
        if types:
            return types
    if node.type in ('for_stmt', 'comp_for'):
        try:
            types = context.predefined_names[node][tree_name.value]
        except KeyError:
            container_types = context.eval_node(node.children[3])
            for_types = iterable.py__iter__types(evaluator, container_types, node.children[3])
            types = check_tuple_assignments(evaluator, for_types, tree_name)
    elif node.isinstance(tree.ExprStmt):
        types = _remove_statements(evaluator, context, node, tree_name)
    elif node.isinstance(tree.WithStmt):
        types = context.eval_node(node.node_from_name(tree_name))
    elif isinstance(node, tree.Import):
        types = imports.infer_import(context, tree_name)
    elif node.type in ('funcdef', 'classdef'):
        types = _apply_decorators(evaluator, context, node)
    elif node.type == 'global_stmt':
        context = evaluator.create_context(context, tree_name)
        finder = NameFinder(evaluator, context, context, str(tree_name))
        filters = finder.get_filters(search_global=True)
        # For global_stmt lookups, we only need the first possible scope,
        # which means the function itself.
        filters = [next(filters)]
        types += finder.find(filters, attribute_lookup=False)
    elif isinstance(node, tree.TryStmt):
        # TODO an exception can also be a tuple. Check for those.
        # TODO check for types that are not classes and add it to
        # the static analysis report.
        exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
        types = unite(
            evaluator.execute(t, param.ValuesArguments([]))
            for t in exceptions
        )
    else:
        raise ValueError("Should not happen.")
    return types
Beispiel #36
0
    def py__iter__(self):
        try:
            _, first_nodes = next(self.var_args.unpack())
        except StopIteration:
            types = set()
        else:
            types = unite(
                self._evaluator.eval_element(node) for node in first_nodes)
            for types in py__iter__(self._evaluator, types, first_nodes[0]):
                yield types

        module = self.var_args.get_parent_until()
        is_list = str(self.instance.name) == 'list'
        additions = _check_array_additions(self._evaluator, self.instance,
                                           module, is_list)
        if additions:
            yield additions
Beispiel #37
0
def _evaluate_for_annotation(context, annotation, index=None):
    """
    Evaluates a string-node, looking for an annotation
    If index is not None, the annotation is expected to be a tuple
    and we're interested in that index
    """
    if annotation is not None:
        definitions = context.eval_node(
            _fix_forward_reference(context, annotation))
        if index is not None:
            definitions = list(itertools.chain.from_iterable(
                definition.py__getitem__(index) for definition in definitions
                if definition.array_type == 'tuple' and
                len(list(definition.py__iter__())) >= index))
        return unite(d.execute_evaluated() for d in definitions)
    else:
        return set()
Beispiel #38
0
def collections_namedtuple(evaluator, obj, arguments):
    """
    Implementation of the namedtuple function.

    This has to be done by processing the namedtuple class template and
    evaluating the result.

    .. note:: |jedi| only supports namedtuples on Python >2.6.

    """
    # Namedtuples are not supported on Python 2.6
    if not hasattr(collections, '_class_template'):
        return set()

    # Process arguments
    # TODO here we only use one of the types, we should use all.
    name = list(_follow_param(evaluator, arguments, 0))[0].obj
    _fields = list(_follow_param(evaluator, arguments, 1))[0]
    if isinstance(_fields, compiled.CompiledObject):
        fields = _fields.obj.replace(',', ' ').split()
    elif isinstance(_fields, iterable.Array):
        try:
            fields = [v.obj for v in unite(_fields.py__iter__())]
        except AttributeError:
            return set()
    else:
        return set()

    # Build source
    source = collections._class_template.format(
        typename=name,
        field_names=fields,
        num_fields=len(fields),
        arg_list=', '.join(fields),
        repr_fmt=', '.join(
            collections._repr_template.format(name=name) for name in fields),
        field_defs='\n'.join(
            collections._field_template.format(index=index, name=name)
            for index, name in enumerate(fields)))

    # Parse source
    generated_class = ParserWithRecovery(evaluator.grammar,
                                         unicode(source)).module.subscopes[0]
    return set([er.Class(evaluator, generated_class)])
def collections_namedtuple(evaluator, obj, arguments):
    """
    Implementation of the namedtuple function.

    This has to be done by processing the namedtuple class template and
    evaluating the result.

    .. note:: |jedi| only supports namedtuples on Python >2.6.

    """
    # Namedtuples are not supported on Python 2.6
    if not hasattr(collections, '_class_template'):
        return set()

    # Process arguments
    # TODO here we only use one of the types, we should use all.
    name = list(_follow_param(evaluator, arguments, 0))[0].obj
    _fields = list(_follow_param(evaluator, arguments, 1))[0]
    if isinstance(_fields, compiled.CompiledObject):
        fields = _fields.obj.replace(',', ' ').split()
    elif isinstance(_fields, iterable.Array):
        try:
            fields = [v.obj for v in unite(_fields.py__iter__())]
        except AttributeError:
            return set()
    else:
        return set()

    # Build source
    source = collections._class_template.format(
        typename=name,
        field_names=fields,
        num_fields=len(fields),
        arg_list=', '.join(fields),
        repr_fmt=', '.join(collections._repr_template.format(name=name) for name in fields),
        field_defs='\n'.join(collections._field_template.format(index=index, name=name)
                             for index, name in enumerate(fields))
    )

    # Parse source
    generated_class = ParserWithRecovery(evaluator.grammar, unicode(source)).module.subscopes[0]
    return set([er.Class(evaluator, generated_class)])
Beispiel #40
0
def py__iter__(evaluator, types, node=None):
    debug.dbg('py__iter__')
    type_iters = []
    for typ in types:
        try:
            iter_method = typ.py__iter__
        except AttributeError:
            if node is not None:
                analysis.add(evaluator,
                             'type-error-not-iterable',
                             node,
                             message="TypeError: '%s' object is not iterable" %
                             typ)
        else:
            type_iters.append(iter_method())
            #for result in iter_method():
            #yield result

    for t in zip_longest(*type_iters, fillvalue=set()):
        yield unite(t)
Beispiel #41
0
def get_types_for_typing_module(evaluator, typ, node):
    from jedi.evaluate.iterable import FakeSequence
    if not typ.base.get_parent_until().name.value == "typing":
        return None
    # we assume that any class using [] in a module called
    # "typing" with a name for which we have a replacement
    # should be replaced by that class. This is not 100%
    # airtight but I don't have a better idea to check that it's
    # actually the PEP-0484 typing module and not some other
    if tree.is_node(node, "subscriptlist"):
        nodes = node.children[::2]  # skip the commas
    else:
        nodes = [node]
    del node

    nodes = [_fix_forward_reference(evaluator, node) for node in nodes]

    # hacked in Union and Optional, since it's hard to do nicely in parsed code
    if typ.name.value == "Union":
        return unite(evaluator.eval_element(node) for node in nodes)
    if typ.name.value == "Optional":
        return evaluator.eval_element(nodes[0])

    typing = _get_typing_replacement_module()
    factories = evaluator.find_types(typing, "factory")
    assert len(factories) == 1
    factory = list(factories)[0]
    assert factory
    function_body_nodes = factory.children[4].children
    valid_classnames = set(child.name.value
                           for child in function_body_nodes
                           if isinstance(child, tree.Class))
    if typ.name.value not in valid_classnames:
        return None
    compiled_classname = compiled.create(evaluator, typ.name.value)

    args = FakeSequence(evaluator, nodes, "tuple")

    result = evaluator.execute_evaluated(factory, compiled_classname, args)
    return result
Beispiel #42
0
def _apply_decorators(evaluator, context, node):
    """
    Returns the function, that should to be executed in the end.
    This is also the places where the decorators are processed.
    """
    if node.type == 'classdef':
        decoratee_context = er.ClassContext(
            evaluator,
            parent_context=context,
            classdef=node
        )
    else:
        decoratee_context = er.FunctionContext(
            evaluator,
            parent_context=context,
            funcdef=node
        )
    initial = values = set([decoratee_context])
    for dec in reversed(node.get_decorators()):
        debug.dbg('decorator: %s %s', dec, values)
        dec_values = context.eval_node(dec.children[1])
        trailer_nodes = dec.children[2:-1]
        if trailer_nodes:
            # Create a trailer and evaluate it.
            trailer = tree.PythonNode('trailer', trailer_nodes)
            trailer.parent = dec
            dec_values = evaluator.eval_trailer(context, dec_values, trailer)

        if not len(dec_values):
            debug.warning('decorator not found: %s on %s', dec, node)
            return initial

        values = unite(dec_value.execute(param.ValuesArguments([values]))
                       for dec_value in dec_values)
        if not len(values):
            debug.warning('not possible to resolve wrappers found %s', node)
            return initial

        debug.dbg('decorator end %s', values)
    return values
Beispiel #43
0
def _apply_decorators(evaluator, context, node):
    """
    Returns the function, that should to be executed in the end.
    This is also the places where the decorators are processed.
    """
    if node.type == 'classdef':
        decoratee_context = er.ClassContext(evaluator,
                                            parent_context=context,
                                            classdef=node)
    else:
        decoratee_context = er.FunctionContext(evaluator,
                                               parent_context=context,
                                               funcdef=node)
    initial = values = set([decoratee_context])
    for dec in reversed(node.get_decorators()):
        debug.dbg('decorator: %s %s', dec, values)
        dec_values = context.eval_node(dec.children[1])
        trailer_nodes = dec.children[2:-1]
        if trailer_nodes:
            # Create a trailer and evaluate it.
            trailer = tree.PythonNode('trailer', trailer_nodes)
            trailer.parent = dec
            dec_values = evaluator.eval_trailer(context, dec_values, trailer)

        if not len(dec_values):
            debug.warning('decorator not found: %s on %s', dec, node)
            return initial

        values = unite(
            dec_value.execute(param.ValuesArguments([values]))
            for dec_value in dec_values)
        if not len(values):
            debug.warning('not possible to resolve wrappers found %s', node)
            return initial

        debug.dbg('decorator end %s', values)
    return values
Beispiel #44
0
 def infer(self):
     return unite(p.infer() for p in self._executed_params)
Beispiel #45
0
 def _values(self):
     """Returns a list of a list of node."""
     if self.array_type == 'dict':
         return unite(v for k, v in self._items())
     else:
         return self._items()
Beispiel #46
0
 def execute_function_slots(self, names, *evaluated_args):
     return unite(name.execute_evaluated(*evaluated_args) for name in names)
Beispiel #47
0
 def execute(arguments):
     return unite(name.execute(arguments) for name in names)
 def infer(self):
     return unite(p.infer() for p in self._executed_params)
Beispiel #49
0
 def py__getitem__(self, index):
     return unite(lazy_context.infer() for lazy_context in self.py__iter__())
Beispiel #50
0
 def infer(self):
     return unite(l.infer() for l in self.data)
Beispiel #51
0
 def execute_evaluated(self, *args, **kwargs):
     return unite(context.execute_evaluated(*args, **kwargs) for context in self.infer())
Beispiel #52
0
 def execute(self, arguments):
     return unite(context.execute(arguments) for context in self.infer())
Beispiel #53
0
 def execute_function_slots(self, names, *evaluated_args):
     return unite(
         name.execute_evaluated(*evaluated_args)
         for name in names
     )
Beispiel #54
0
 def py__getitem__(self, index):
     return unite(lazy_context.infer() for lazy_context in self.py__iter__())
Beispiel #55
0
 def dict_values(self):
     return unite(lazy_context.infer() for lazy_context in self._dct.values())
Beispiel #56
0
 def infer(self):
     return unite(l.infer() for l in self.data)
Beispiel #57
0
 def execute(arguments):
     return unite(name.execute(arguments) for name in names)