Exemple #1
0
    def get_calling_nodes(self):
        from jedi.evaluate.dynamic import DynamicExecutedParams
        old_arguments_list = []
        arguments = self

        while arguments not in old_arguments_list:
            if not isinstance(arguments, TreeArguments):
                break

            old_arguments_list.append(arguments)
            for calling_name in reversed(list(arguments.iter_calling_names_with_star())):
                names = calling_name.goto()
                if len(names) != 1:
                    break
                if not isinstance(names[0], ParamName):
                    break
                param = names[0].get_param()
                if isinstance(param, DynamicExecutedParams):
                    # For dynamic searches we don't even want to see errors.
                    return []
                if not isinstance(param, ExecutedParam):
                    break
                if param.var_args is None:
                    break
                arguments = param.var_args
                break

        if arguments.argument_node is not None:
            return [ContextualizedNode(arguments.context, arguments.argument_node)]
        if arguments.trailer is not None:
            return [ContextualizedNode(arguments.context, arguments.trailer)]
        return []
Exemple #2
0
def tree_name_to_contexts(evaluator, context, tree_name):
    types = []
    node = tree_name.get_definition(import_name_always=True)
    if node is None:
        node = tree_name.parent
        if node.type == 'global_stmt':
            context = evaluator.create_context(context, tree_name)
            finder = NameFinder(evaluator, context, context, tree_name.value)
            filters = finder.get_filters(search_global=True)
            # For global_stmt lookups, we only need the first possible scope,
            # which means the function itself.
            filters = [next(filters)]
            return finder.find(filters, attribute_lookup=False)
        elif node.type not in ('import_from', 'import_name'):
            raise ValueError("Should not happen.")

    typ = node.type
    if typ == 'for_stmt':
        types = pep0484.find_type_from_comment_hint_for(
            context, node, tree_name)
        if types:
            return types
    if typ == 'with_stmt':
        types = pep0484.find_type_from_comment_hint_with(
            context, node, tree_name)
        if types:
            return types

    if typ in ('for_stmt', 'comp_for'):
        try:
            types = context.predefined_names[node][tree_name.value]
        except KeyError:
            cn = ContextualizedNode(context, node.children[3])
            for_types = iterate_contexts(cn.infer(), cn)
            c_node = ContextualizedName(context, tree_name)
            types = check_tuple_assignments(evaluator, c_node, for_types)
    elif typ == 'expr_stmt':
        types = _remove_statements(evaluator, context, node, tree_name)
    elif typ == 'with_stmt':
        context_managers = context.eval_node(
            node.get_test_node_from_name(tree_name))
        enter_methods = context_managers.py__getattribute__('__enter__')
        return enter_methods.execute_evaluated()
    elif typ in ('import_from', 'import_name'):
        types = imports.infer_import(context, tree_name)
    elif typ in ('funcdef', 'classdef'):
        types = _apply_decorators(context, node)
    elif typ == 'try_stmt':
        # TODO an exception can also be a tuple. Check for those. id:627 gh:628
        # TODO check for types that are not classes and add it to id:731 gh:732
        # the static analysis report.
        exceptions = context.eval_node(
            tree_name.get_previous_sibling().get_previous_sibling())
        types = exceptions.execute_evaluated()
    else:
        raise ValueError("Should not happen.")
    return types
Exemple #3
0
def tree_name_to_contexts(evaluator, context, tree_name):
    types = []
    node = tree_name.get_definition(import_name_always=True)
    if node is None:
        node = tree_name.parent
        if node.type == 'global_stmt':
            context = evaluator.create_context(context, tree_name)
            finder = NameFinder(evaluator, context, context, tree_name.value)
            filters = finder.get_filters(search_global=True)
            # For global_stmt lookups, we only need the first possible scope,
            # which means the function itself.
            filters = [next(filters)]
            return finder.find(filters, attribute_lookup=False)
        elif node.type not in ('import_from', 'import_name'):
            raise ValueError("Should not happen.")

    typ = node.type
    if typ == 'for_stmt':
        types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
        if types:
            return types
    if typ == 'with_stmt':
        types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
        if types:
            return types

    if typ in ('for_stmt', 'comp_for'):
        try:
            types = context.predefined_names[node][tree_name.value]
        except KeyError:
            cn = ContextualizedNode(context, node.children[3])
            for_types = iterate_contexts(cn.infer(), cn)
            c_node = ContextualizedName(context, tree_name)
            types = check_tuple_assignments(evaluator, c_node, for_types)
    elif typ == 'expr_stmt':
        types = _remove_statements(evaluator, context, node, tree_name)
    elif typ == 'with_stmt':
        context_managers = context.eval_node(node.get_test_node_from_name(tree_name))
        enter_methods = context_managers.py__getattribute__('__enter__')
        return enter_methods.execute_evaluated()
    elif typ in ('import_from', 'import_name'):
        types = imports.infer_import(context, tree_name)
    elif typ in ('funcdef', 'classdef'):
        types = _apply_decorators(context, node)
    elif typ == 'try_stmt':
        # TODO an exception can also be a tuple. Check for those.
        # TODO check for types that are not classes and add it to
        # the static analysis report.
        exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
        types = exceptions.execute_evaluated()
    else:
        raise ValueError("Should not happen.")
    return types
Exemple #4
0
    def get_yield_lazy_contexts(self, is_async=False):
        # TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
        for_parents = [(y,
                        tree.search_ancestor(y, 'for_stmt', 'funcdef',
                                             'while_stmt', 'if_stmt'))
                       for y in get_yield_exprs(self.evaluator, self.tree_node)
                       ]

        # Calculate if the yields are placed within the same for loop.
        yields_order = []
        last_for_stmt = None
        for yield_, for_stmt in for_parents:
            # For really simple for loops we can predict the order. Otherwise
            # we just ignore it.
            parent = for_stmt.parent
            if parent.type == 'suite':
                parent = parent.parent
            if for_stmt.type == 'for_stmt' and parent == self.tree_node \
                    and parser_utils.for_stmt_defines_one_name(for_stmt):  # Simplicity for now.
                if for_stmt == last_for_stmt:
                    yields_order[-1][1].append(yield_)
                else:
                    yields_order.append((for_stmt, [yield_]))
            elif for_stmt == self.tree_node:
                yields_order.append((None, [yield_]))
            else:
                types = self.get_return_values(check_yields=True)
                if types:
                    yield LazyKnownContexts(types)
                return
            last_for_stmt = for_stmt

        for for_stmt, yields in yields_order:
            if for_stmt is None:
                # No for_stmt, just normal yields.
                for yield_ in yields:
                    for result in self._get_yield_lazy_context(yield_):
                        yield result
            else:
                input_node = for_stmt.get_testlist()
                cn = ContextualizedNode(self, input_node)
                ordered = cn.infer().iterate(cn)
                ordered = list(ordered)
                for lazy_context in ordered:
                    dct = {
                        str(for_stmt.children[1].value): lazy_context.infer()
                    }
                    with helpers.predefine_names(self, for_stmt, dct):
                        for yield_in_same_for_stmt in yields:
                            for result in self._get_yield_lazy_context(
                                    yield_in_same_for_stmt):
                                yield result
Exemple #5
0
    def _eval_yield(self, yield_expr):
        if yield_expr.type == 'keyword':
            # `yield` just yields None.
            yield LazyKnownContext(compiled.create(self.evaluator, None))
            return

        node = yield_expr.children[1]
        if node.type == 'yield_arg':  # It must be a yield from.
            cn = ContextualizedNode(self, node.children[1])
            for lazy_context in cn.infer().iterate(cn):
                yield lazy_context
        else:
            yield LazyTreeContext(self, node)
Exemple #6
0
    def _eval_yield(self, yield_expr):
        if yield_expr.type == 'keyword':
            # `yield` just yields None.
            yield LazyKnownContext(compiled.create(self.evaluator, None))
            return

        node = yield_expr.children[1]
        if node.type == 'yield_arg':  # It must be a yield from.
            cn = ContextualizedNode(self, node.children[1])
            for lazy_context in cn.infer().iterate(cn):
                yield lazy_context
        else:
            yield LazyTreeContext(self, node)
Exemple #7
0
def _eval_expr_stmt(context, stmt, seek_name=None):
    """
    The starting point of the completion. A statement always owns a call
    list, which are the calls, that a statement does. In case multiple
    names are defined in the statement, `seek_name` returns the result for
    this name.

    :param stmt: A `tree.ExprStmt`.
    """
    debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name)
    rhs = stmt.get_rhs()
    context_set = context.eval_node(rhs)

    if seek_name:
        c_node = ContextualizedName(context, seek_name)
        context_set = check_tuple_assignments(context.evaluator, c_node,
                                              context_set)

    first_operator = next(stmt.yield_operators(), None)
    if first_operator not in ('=', None) and first_operator.type == 'operator':
        # `=` is always the last character in aug assignments -> -1
        operator = copy.copy(first_operator)
        operator.value = operator.value[:-1]
        name = stmt.get_defined_names()[0].value
        left = context.py__getattribute__(name,
                                          position=stmt.start_pos,
                                          search_global=True)

        for_stmt = tree.search_ancestor(stmt, 'for_stmt')
        if for_stmt is not None and for_stmt.type == 'for_stmt' and context_set \
                and parser_utils.for_stmt_defines_one_name(for_stmt):
            # Iterate through result and add the values, that's possible
            # only in for loops without clutter, because they are
            # predictable. Also only do it, if the variable is not a tuple.
            node = for_stmt.get_testlist()
            cn = ContextualizedNode(context, node)
            ordered = list(cn.infer().iterate(cn))

            for lazy_context in ordered:
                dct = {for_stmt.children[1].value: lazy_context.infer()}
                with helpers.predefine_names(context, for_stmt, dct):
                    t = context.eval_node(rhs)
                    left = _eval_comparison(context.evaluator, context, left,
                                            operator, t)
            context_set = left
        else:
            context_set = _eval_comparison(context.evaluator, context, left,
                                           operator, context_set)
    debug.dbg('eval_expr_stmt result %s', context_set)
    return context_set
Exemple #8
0
    def _get_yield_lazy_context(self, yield_expr):
        if yield_expr.type == 'keyword':
            # `yield` just yields None.
            ctx = compiled.builtin_from_name(self.evaluator, u'None')
            yield LazyKnownContext(ctx)
            return

        node = yield_expr.children[1]
        if node.type == 'yield_arg':  # It must be a yield from.
            cn = ContextualizedNode(self, node.children[1])
            for lazy_context in cn.infer().iterate(cn):
                yield lazy_context
        else:
            yield LazyTreeContext(self, node)
Exemple #9
0
    def _get_yield_lazy_context(self, yield_expr):
        if yield_expr.type == 'keyword':
            # `yield` just yields None.
            ctx = compiled.builtin_from_name(self.evaluator, u'None')
            yield LazyKnownContext(ctx)
            return

        node = yield_expr.children[1]
        if node.type == 'yield_arg':  # It must be a yield from.
            cn = ContextualizedNode(self, node.children[1])
            for lazy_context in cn.infer().iterate(cn):
                yield lazy_context
        else:
            yield LazyTreeContext(self, node)
Exemple #10
0
def eval_trailer(context, base_contexts, trailer):
    trailer_op, node = trailer.children[:2]
    if node == ')':  # `arglist` is optional.
        node = None

    if trailer_op == '[':
        trailer_op, node, _ = trailer.children

        # TODO It's kind of stupid to cast this from a context set to a set.
        foo = set(base_contexts)
        # special case: PEP0484 typing module, see
        # https://github.com/davidhalter/jedi/issues/663
        result = ContextSet()
        for typ in list(foo):
            if isinstance(typ, (ClassContext, TreeInstance)):
                typing_module_types = pep0484.py__getitem__(context, typ, node)
                if typing_module_types is not None:
                    foo.remove(typ)
                    result |= typing_module_types

        return result | base_contexts.get_item(
            eval_subscript_list(context.evaluator, context, node),
            ContextualizedNode(context, trailer))
    else:
        debug.dbg('eval_trailer: %s in %s', trailer, base_contexts)
        if trailer_op == '.':
            return base_contexts.py__getattribute__(name_context=context,
                                                    name_or_str=node)
        else:
            assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
            args = arguments.TreeArguments(context.evaluator, context, node,
                                           trailer)
            return base_contexts.execute(args)
Exemple #11
0
    def _nested(self, comp_fors, parent_context=None):
        comp_for = comp_fors[0]

        is_async = 'async' == comp_for.children[comp_for.children.index('for')
                                                - 1]

        input_node = comp_for.children[comp_for.children.index('in') + 1]
        parent_context = parent_context or self._defining_context
        input_types = parent_context.eval_node(input_node)
        # TODO: simulate await if self.is_async

        cn = ContextualizedNode(parent_context, input_node)
        iterated = input_types.iterate(cn, is_async=is_async)
        exprlist = comp_for.children[comp_for.children.index('for') + 1]
        for i, lazy_context in enumerate(iterated):
            types = lazy_context.infer()
            dct = unpack_tuple_to_dict(parent_context, types, exprlist)
            context_ = self._get_comp_for_context(
                parent_context,
                comp_for,
            )
            with predefine_names(context_, comp_for, dct):
                try:
                    for result in self._nested(comp_fors[1:], context_):
                        yield result
                except IndexError:
                    iterated = context_.eval_node(self._eval_node())
                    if self.array_type == 'dict':
                        yield iterated, context_.eval_node(self._eval_node(2))
                    else:
                        yield iterated
Exemple #12
0
    def goto_stub_definitions(self, context, name):
        def_ = name.get_definition(import_name_always=True)
        if def_ is not None:
            type_ = def_.type
            is_classdef = type_ == 'classdef'
            if is_classdef or type_ == 'funcdef':
                if is_classdef:
                    c = ClassContext(self, context, name.parent)
                else:
                    c = FunctionContext.from_context(context, name.parent)
                return ContextSet([c])

            if type_ == 'expr_stmt':
                is_simple_name = name.parent.type not in ('power', 'trailer')
                if is_simple_name:
                    return eval_expr_stmt(context, def_, name)
            if type_ == 'for_stmt':
                container_types = context.eval_node(def_.children[3])
                cn = ContextualizedNode(context, def_.children[3])
                for_types = iterate_contexts(container_types, cn)
                c_node = ContextualizedName(context, name)
                return check_tuple_assignments(self, c_node, for_types)
            if type_ in ('import_from', 'import_name'):
                return imports.infer_import(context, name)
        else:
            result = self._follow_error_node_imports_if_possible(context, name)
            if result is not None:
                return result

        return helpers.evaluate_call_of_leaf(context, name)
Exemple #13
0
    def get_yield_lazy_contexts(self, is_async=False):
        # TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
        for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef',
                                                'while_stmt', 'if_stmt'))
                       for y in get_yield_exprs(self.evaluator, self.tree_node)]

        # Calculate if the yields are placed within the same for loop.
        yields_order = []
        last_for_stmt = None
        for yield_, for_stmt in for_parents:
            # For really simple for loops we can predict the order. Otherwise
            # we just ignore it.
            parent = for_stmt.parent
            if parent.type == 'suite':
                parent = parent.parent
            if for_stmt.type == 'for_stmt' and parent == self.tree_node \
                    and parser_utils.for_stmt_defines_one_name(for_stmt):  # Simplicity for now.
                if for_stmt == last_for_stmt:
                    yields_order[-1][1].append(yield_)
                else:
                    yields_order.append((for_stmt, [yield_]))
            elif for_stmt == self.tree_node:
                yields_order.append((None, [yield_]))
            else:
                types = self.get_return_values(check_yields=True)
                if types:
                    yield LazyKnownContexts(types)
                return
            last_for_stmt = for_stmt

        for for_stmt, yields in yields_order:
            if for_stmt is None:
                # No for_stmt, just normal yields.
                for yield_ in yields:
                    for result in self._get_yield_lazy_context(yield_):
                        yield result
            else:
                input_node = for_stmt.get_testlist()
                cn = ContextualizedNode(self, input_node)
                ordered = cn.infer().iterate(cn)
                ordered = list(ordered)
                for lazy_context in ordered:
                    dct = {str(for_stmt.children[1].value): lazy_context.infer()}
                    with helpers.predefine_names(self, for_stmt, dct):
                        for yield_in_same_for_stmt in yields:
                            for result in self._get_yield_lazy_context(yield_in_same_for_stmt):
                                yield result
Exemple #14
0
def _eval_expr_stmt(context, stmt, seek_name=None):
    """
    The starting point of the completion. A statement always owns a call
    list, which are the calls, that a statement does. In case multiple
    names are defined in the statement, `seek_name` returns the result for
    this name.

    :param stmt: A `tree.ExprStmt`.
    """
    debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name)
    rhs = stmt.get_rhs()
    context_set = context.eval_node(rhs)

    if seek_name:
        c_node = ContextualizedName(context, seek_name)
        context_set = check_tuple_assignments(context.evaluator, c_node, context_set)

    first_operator = next(stmt.yield_operators(), None)
    if first_operator not in ('=', None) and first_operator.type == 'operator':
        # `=` is always the last character in aug assignments -> -1
        operator = copy.copy(first_operator)
        operator.value = operator.value[:-1]
        name = stmt.get_defined_names()[0].value
        left = context.py__getattribute__(
            name, position=stmt.start_pos, search_global=True)

        for_stmt = tree.search_ancestor(stmt, 'for_stmt')
        if for_stmt is not None and for_stmt.type == 'for_stmt' and context_set \
                and parser_utils.for_stmt_defines_one_name(for_stmt):
            # Iterate through result and add the values, that's possible
            # only in for loops without clutter, because they are
            # predictable. Also only do it, if the variable is not a tuple.
            node = for_stmt.get_testlist()
            cn = ContextualizedNode(context, node)
            ordered = list(cn.infer().iterate(cn))

            for lazy_context in ordered:
                dct = {for_stmt.children[1].value: lazy_context.infer()}
                with helpers.predefine_names(context, for_stmt, dct):
                    t = context.eval_node(rhs)
                    left = _eval_comparison(context.evaluator, context, left, operator, t)
            context_set = left
        else:
            context_set = _eval_comparison(context.evaluator, context, left, operator, context_set)
    debug.dbg('eval_expr_stmt result %s', context_set)
    return context_set
Exemple #15
0
def _paths_from_assignment(module_context, expr_stmt):
    """
    Extracts the assigned strings from an assignment that looks as follows::

        sys.path[0:0] = ['module/path', 'another/module/path']

    This function is in general pretty tolerant (and therefore 'buggy').
    However, it's not a big issue usually to add more paths to Jedi's sys_path,
    because it will only affect Jedi in very random situations and by adding
    more paths than necessary, it usually benefits the general user.
    """
    for assignee, operator in zip(expr_stmt.children[::2],
                                  expr_stmt.children[1::2]):
        try:
            assert operator in ['=', '+=']
            assert assignee.type in ('power', 'atom_expr') and \
                len(assignee.children) > 1
            c = assignee.children
            assert c[0].type == 'name' and c[0].value == 'sys'
            trailer = c[1]
            assert trailer.children[0] == '.' and trailer.children[
                1].value == 'path'
            # TODO Essentially we're not checking details on sys.path
            # manipulation. Both assigment of the sys.path and changing/adding
            # parts of the sys.path are the same: They get added to the end of
            # the current sys.path.
            """
            execution = c[2]
            assert execution.children[0] == '['
            subscript = execution.children[1]
            assert subscript.type == 'subscript'
            assert ':' in subscript.children
            """
        except AssertionError:
            continue

        cn = ContextualizedNode(module_context.create_context(expr_stmt),
                                expr_stmt)
        for lazy_context in cn.infer().iterate(cn):
            for context in lazy_context.infer():
                if is_string(context):
                    abs_path = _abs_path(module_context,
                                         context.get_safe_value())
                    if abs_path is not None:
                        yield abs_path
Exemple #16
0
def _paths_from_assignment(module_context, expr_stmt):
    """
    Extracts the assigned strings from an assignment that looks as follows::

        sys.path[0:0] = ['module/path', 'another/module/path']

    This function is in general pretty tolerant (and therefore 'buggy').
    However, it's not a big issue usually to add more paths to Jedi's sys_path,
    because it will only affect Jedi in very random situations and by adding
    more paths than necessary, it usually benefits the general user.
    """
    for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]):
        try:
            assert operator in ['=', '+=']
            assert assignee.type in ('power', 'atom_expr') and \
                len(assignee.children) > 1
            c = assignee.children
            assert c[0].type == 'name' and c[0].value == 'sys'
            trailer = c[1]
            assert trailer.children[0] == '.' and trailer.children[1].value == 'path'
            # TODO Essentially we're not checking details on sys.path
            # manipulation. Both assigment of the sys.path and changing/adding
            # parts of the sys.path are the same: They get added to the end of
            # the current sys.path.
            """
            execution = c[2]
            assert execution.children[0] == '['
            subscript = execution.children[1]
            assert subscript.type == 'subscript'
            assert ':' in subscript.children
            """
        except AssertionError:
            continue

        cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
        for lazy_context in cn.infer().iterate(cn):
            for context in lazy_context.infer():
                if is_string(context):
                    abs_path = _abs_path(module_context, context.get_safe_value())
                    if abs_path is not None:
                        yield abs_path
Exemple #17
0
def builtins_reversed(sequences, obj, arguments):
    # While we could do without this variable (just by using sequences), we
    # want static analysis to work well. Therefore we need to generated the
    # values again.
    key, lazy_context = next(arguments.unpack())
    cn = None
    if isinstance(lazy_context, LazyTreeContext):
        # TODO access private
        cn = ContextualizedNode(lazy_context.context, lazy_context.data)
    ordered = list(sequences.iterate(cn))

    # Repack iterator values and then run it the normal way. This is
    # necessary, because `reversed` is a function and autocompletion
    # would fail in certain cases like `reversed(x).__iter__` if we
    # just returned the result directly.
    seq, = obj.evaluator.typing_module.py__getattribute__('Iterator').execute_evaluated()
    return ContextSet([ReversedObject(seq, list(reversed(ordered)))])
Exemple #18
0
def builtins_reversed(evaluator, sequences, obj, arguments):
    # While we could do without this variable (just by using sequences), we
    # want static analysis to work well. Therefore we need to generated the
    # values again.
    key, lazy_context = next(arguments.unpack())
    cn = None
    if isinstance(lazy_context, LazyTreeContext):
        # TODO access private
        cn = ContextualizedNode(lazy_context._context, lazy_context.data)
    ordered = list(sequences.iterate(cn))

    rev = list(reversed(ordered))
    # Repack iterator values and then run it the normal way. This is
    # necessary, because `reversed` is a function and autocompletion
    # would fail in certain cases like `reversed(x).__iter__` if we
    # just returned the result directly.
    seq = iterable.FakeSequence(evaluator, u'list', rev)
    arguments = ValuesArguments([ContextSet(seq)])
    return ContextSet(CompiledInstance(evaluator, evaluator.builtins_module, obj, arguments))
Exemple #19
0
def check_tuple_assignments(evaluator, contextualized_name, context_set):
    """
    Checks if tuples are assigned.
    """
    lazy_context = None
    for index, node in contextualized_name.assignment_indexes():
        cn = ContextualizedNode(contextualized_name.context, node)
        iterated = context_set.iterate(cn)
        for _ in range(index + 1):
            try:
                lazy_context = next(iterated)
            except StopIteration:
                # We could do this with the default param in next. But this
                # would allow this loop to run for a very long time if the
                # index number is high. Therefore break if the loop is
                # finished.
                return ContextSet()
        context_set = lazy_context.infer()
    return context_set
Exemple #20
0
def eval_trailer(context, base_contexts, trailer):
    trailer_op, node = trailer.children[:2]
    if node == ')':  # `arglist` is optional.
        node = None

    if trailer_op == '[':
        trailer_op, node, _ = trailer.children
        return base_contexts.get_item(
            eval_subscript_list(context.evaluator, context, node),
            ContextualizedNode(context, trailer))
    else:
        debug.dbg('eval_trailer: %s in %s', trailer, base_contexts)
        if trailer_op == '.':
            return base_contexts.py__getattribute__(name_context=context,
                                                    name_or_str=node)
        else:
            assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
            args = arguments.TreeArguments(context.evaluator, context, node,
                                           trailer)
            return base_contexts.execute(args)
Exemple #21
0
    def goto_definitions(self, context, name):
        def_ = name.get_definition(import_name_always=True)
        if def_ is not None:
            type_ = def_.type
            if type_ == 'classdef':
                return [ClassContext(self, context, name.parent)]
            elif type_ == 'funcdef':
                return [FunctionContext(self, context, name.parent)]

            if type_ == 'expr_stmt':
                is_simple_name = name.parent.type not in ('power', 'trailer')
                if is_simple_name:
                    return eval_expr_stmt(context, def_, name)
            if type_ == 'for_stmt':
                container_types = context.eval_node(def_.children[3])
                cn = ContextualizedNode(context, def_.children[3])
                for_types = iterate_contexts(container_types, cn)
                c_node = ContextualizedName(context, name)
                return check_tuple_assignments(self, c_node, for_types)
            if type_ in ('import_from', 'import_name'):
                return imports.infer_import(context, name)

        return helpers.evaluate_call_of_leaf(context, name)
Exemple #22
0
def tree_name_to_contexts(evaluator, context, tree_name):

    context_set = ContextSet()
    module_node = context.get_root_context().tree_node
    if module_node is not None:
        names = module_node.get_used_names().get(tree_name.value, [])
        for name in names:
            expr_stmt = name.parent

            correct_scope = parser_utils.get_parent_scope(name) == context.tree_node

            if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign" and correct_scope:
                context_set |= _evaluate_for_annotation(context, expr_stmt.children[1].children[1])

    if context_set:
        return context_set

    types = []
    node = tree_name.get_definition(import_name_always=True)
    if node is None:
        node = tree_name.parent
        if node.type == 'global_stmt':
            context = evaluator.create_context(context, tree_name)
            finder = NameFinder(evaluator, context, context, tree_name.value)
            filters = finder.get_filters(search_global=True)
            # For global_stmt lookups, we only need the first possible scope,
            # which means the function itself.
            filters = [next(filters)]
            return finder.find(filters, attribute_lookup=False)
        elif node.type not in ('import_from', 'import_name'):
            context = evaluator.create_context(context, tree_name)
            return eval_atom(context, tree_name)

    typ = node.type
    if typ == 'for_stmt':
        types = pep0484.find_type_from_comment_hint_for(context, node, tree_name)
        if types:
            return types
    if typ == 'with_stmt':
        types = pep0484.find_type_from_comment_hint_with(context, node, tree_name)
        if types:
            return types

    if typ in ('for_stmt', 'comp_for'):
        try:
            types = context.predefined_names[node][tree_name.value]
        except KeyError:
            cn = ContextualizedNode(context, node.children[3])
            for_types = iterate_contexts(
                cn.infer(),
                contextualized_node=cn,
                is_async=node.parent.type == 'async_stmt',
            )
            c_node = ContextualizedName(context, tree_name)
            types = check_tuple_assignments(evaluator, c_node, for_types)
    elif typ == 'expr_stmt':
        types = _remove_statements(evaluator, context, node, tree_name)
    elif typ == 'with_stmt':
        context_managers = context.eval_node(node.get_test_node_from_name(tree_name))
        enter_methods = context_managers.py__getattribute__(u'__enter__')
        return enter_methods.execute_evaluated()
    elif typ in ('import_from', 'import_name'):
        types = imports.infer_import(context, tree_name)
    elif typ in ('funcdef', 'classdef'):
        types = _apply_decorators(context, node)
    elif typ == 'try_stmt':
        # TODO an exception can also be a tuple. Check for those.
        # TODO check for types that are not classes and add it to
        # the static analysis report.
        exceptions = context.eval_node(tree_name.get_previous_sibling().get_previous_sibling())
        types = exceptions.execute_evaluated()
    elif node.type == 'param':
        types = NO_CONTEXTS
    else:
        raise ValueError("Should not happen. type: %s" % typ)
    return types
Exemple #23
0
def tree_name_to_contexts(evaluator, context, tree_name):
    context_set = NO_CONTEXTS
    module_node = context.get_root_context().tree_node
    # First check for annotations, like: `foo: int = 3`
    if module_node is not None:
        names = module_node.get_used_names().get(tree_name.value, [])
        for name in names:
            expr_stmt = name.parent

            if expr_stmt.type == "expr_stmt" and expr_stmt.children[
                    1].type == "annassign":
                correct_scope = parser_utils.get_parent_scope(
                    name) == context.tree_node
                if correct_scope:
                    context_set |= annotation.eval_annotation(
                        context,
                        expr_stmt.children[1].children[1]).execute_annotation(
                        )
        if context_set:
            return context_set

    types = []
    node = tree_name.get_definition(import_name_always=True)
    if node is None:
        node = tree_name.parent
        if node.type == 'global_stmt':
            context = evaluator.create_context(context, tree_name)
            finder = NameFinder(evaluator, context, context, tree_name.value)
            filters = finder.get_filters(search_global=True)
            # For global_stmt lookups, we only need the first possible scope,
            # which means the function itself.
            filters = [next(filters)]
            return finder.find(filters, attribute_lookup=False)
        elif node.type not in ('import_from', 'import_name'):
            context = evaluator.create_context(context, tree_name)
            return eval_atom(context, tree_name)

    typ = node.type
    if typ == 'for_stmt':
        types = annotation.find_type_from_comment_hint_for(
            context, node, tree_name)
        if types:
            return types
    if typ == 'with_stmt':
        types = annotation.find_type_from_comment_hint_with(
            context, node, tree_name)
        if types:
            return types

    if typ in ('for_stmt', 'comp_for', 'sync_comp_for'):
        try:
            types = context.predefined_names[node][tree_name.value]
        except KeyError:
            cn = ContextualizedNode(context, node.children[3])
            for_types = iterate_contexts(
                cn.infer(),
                contextualized_node=cn,
                is_async=node.parent.type == 'async_stmt',
            )
            c_node = ContextualizedName(context, tree_name)
            types = check_tuple_assignments(evaluator, c_node, for_types)
    elif typ == 'expr_stmt':
        types = _remove_statements(evaluator, context, node, tree_name)
    elif typ == 'with_stmt':
        context_managers = context.eval_node(
            node.get_test_node_from_name(tree_name))
        enter_methods = context_managers.py__getattribute__(u'__enter__')
        return enter_methods.execute_evaluated()
    elif typ in ('import_from', 'import_name'):
        types = imports.infer_import(context, tree_name)
    elif typ in ('funcdef', 'classdef'):
        types = _apply_decorators(context, node)
    elif typ == 'try_stmt':
        # TODO an exception can also be a tuple. Check for those.
        # TODO check for types that are not classes and add it to
        # the static analysis report.
        exceptions = context.eval_node(
            tree_name.get_previous_sibling().get_previous_sibling())
        types = exceptions.execute_evaluated()
    elif node.type == 'param':
        types = NO_CONTEXTS
    else:
        raise ValueError("Should not happen. type: %s" % typ)
    return types