Exemple #1
0
 def add_issue(self, node, code, message):
     if self._previous_leaf is not None:
         if search_ancestor(self._previous_leaf, 'error_node') is not None:
             return
         if self._previous_leaf.type == 'error_leaf':
             return
     if search_ancestor(node, 'error_node') is not None:
         return
     if code in (901, 903):
         # 901 and 903 are raised by the ErrorFinder.
         super(PEP8Normalizer, self).add_issue(node, code, message)
     else:
         # Skip ErrorFinder here, because it has custom behavior.
         super(ErrorFinder, self).add_issue(node, code, message)
Exemple #2
0
 def add_issue(self, node, code, message):
     if self._previous_leaf is not None:
         if search_ancestor(self._previous_leaf, 'error_node') is not None:
             return
         if self._previous_leaf.type == 'error_leaf':
             return
     if search_ancestor(node, 'error_node') is not None:
         return
     if code in (901, 903):
         # 901 and 903 are raised by the ErrorFinder.
         super(PEP8Normalizer, self).add_issue(node, code, message)
     else:
         # Skip ErrorFinder here, because it has custom behavior.
         super(ErrorFinder, self).add_issue(node, code, message)
Exemple #3
0
    def get_context(self, line=None, column=None):
        pos = (line, column)
        leaf = self._module_node.get_leaf_for_position(pos,
                                                       include_prefixes=True)
        if leaf.start_pos > pos or leaf.type == 'endmarker':
            previous_leaf = leaf.get_previous_leaf()
            if previous_leaf is not None:
                leaf = previous_leaf

        module_context = self._get_module_context()

        n = tree.search_ancestor(leaf, 'funcdef', 'classdef')
        if n is not None and n.start_pos < pos <= n.children[-1].start_pos:
            # This is a bit of a special case. The context of a function/class
            # name/param/keyword is always it's parent context, not the
            # function itself. Catch all the cases here where we are before the
            # suite object, but still in the function.
            context = module_context.create_value(n).as_context()
        else:
            context = module_context.create_context(leaf)

        while context.name is None:
            context = context.parent_context  # comprehensions

        definition = classes.Definition(self._inference_state, context.name)
        while definition.type != 'module':
            name = definition._name  # TODO private access
            tree_name = name.tree_name
            if tree_name is not None:  # Happens with lambdas.
                scope = tree_name.get_definition()
                if scope.start_pos[1] < column:
                    break
            definition = definition.parent()
        return definition
Exemple #4
0
    def parent(self):
        if not self._name.is_value_name:
            return None

        if self.type in ('function', 'class',
                         'param') and self._name.tree_name is not None:
            # Since the parent_context doesn't really match what the user
            # thinks of that the parent is here, we do these cases separately.
            # The reason for this is the following:
            # - class: Nested classes parent_context is always the
            #   parent_context of the most outer one.
            # - function: Functions in classes have the module as
            #   parent_context.
            # - param: The parent_context of a param is not its function but
            #   e.g. the outer class or module.
            cls_or_func_node = self._name.tree_name.get_definition()
            parent = search_ancestor(cls_or_func_node, 'funcdef', 'classdef',
                                     'file_input')
            context = self._get_module_context().create_value(
                parent).as_context()
        else:
            context = self._name.parent_context

        if context is None:
            return None
        while context.name is None:
            # Happens for comprehension contexts
            context = context.parent_context

        return Definition(self._inference_state, context.name)
def _iter_nodes_for_param(param_name):
    from parso.python.tree import search_ancestor
    from jedi.inference.arguments import TreeArguments

    execution_context = param_name.parent_context
    # Walk up the parso tree to get the FunctionNode we want. We use the parso
    # tree rather than going via the execution context so that we're agnostic of
    # the specific scope we're evaluating within (i.e: module or function,
    # etc.).
    function_node = tree.search_ancestor(param_name.tree_name, 'funcdef',
                                         'lambdef')
    module_node = function_node.get_root_node()
    start = function_node.children[-1].start_pos
    end = function_node.children[-1].end_pos
    for name in module_node.get_used_names().get(param_name.string_name):
        if start <= name.start_pos < end:
            # Is used in the function
            argument = name.parent
            if argument.type == 'argument' \
                    and argument.children[0] == '*' * param_name.star_count:
                trailer = search_ancestor(argument, 'trailer')
                if trailer is not None:  # Make sure we're in a function
                    context = execution_context.create_context(trailer)
                    if _goes_to_param_name(param_name, context, name):
                        values = _to_callables(context, trailer)

                        args = TreeArguments.create_cached(
                            execution_context.inference_state,
                            context=context,
                            argument_node=trailer.children[1],
                            trailer=trailer,
                        )
                        for c in values:
                            yield c, args
Exemple #6
0
def _iter_nodes_for_param(param_name):
    from parso.python.tree import search_ancestor
    from jedi.inference.arguments import TreeArguments

    execution_context = param_name.parent_context
    function_node = execution_context.tree_node
    module_node = function_node.get_root_node()
    start = function_node.children[-1].start_pos
    end = function_node.children[-1].end_pos
    for name in module_node.get_used_names().get(param_name.string_name):
        if start <= name.start_pos < end:
            # Is used in the function
            argument = name.parent
            if argument.type == 'argument' \
                    and argument.children[0] == '*' * param_name.star_count:
                trailer = search_ancestor(argument, 'trailer')
                if trailer is not None:  # Make sure we're in a function
                    context = execution_context.create_context(trailer)
                    if _goes_to_param_name(param_name, context, name):
                        values = _to_callables(context, trailer)

                        args = TreeArguments.create_cached(
                            execution_context.inference_state,
                            context=context,
                            argument_node=trailer.children[1],
                            trailer=trailer,
                        )
                        for c in values:
                            yield c, args
Exemple #7
0
    def __init__(self,
                 config,
                 parent_indentation,
                 containing_leaf,
                 spacing,
                 parent=None):
        expr_stmt = search_ancestor(containing_leaf, 'expr_stmt')
        if expr_stmt is not None:
            equals = expr_stmt.children[-2]

            if '\t' in config.indentation:
                # TODO unite with the code of BracketNode
                self.indentation = None
            else:
                # If the backslash follows the equals, use normal indentation
                # otherwise it should align with the equals.
                if equals.end_pos == spacing.start_pos:
                    self.indentation = parent_indentation + config.indentation
                else:
                    # +1 because there is a space.
                    self.indentation = ' ' * (equals.end_pos[1] + 1)
        else:
            self.indentation = parent_indentation + config.indentation
        self.bracket_indentation = self.indentation
        self.parent = parent
Exemple #8
0
    def _get_class_context_completions(self, is_function=True):
        """
        Autocomplete inherited methods when overriding in child class.
        """
        leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
        cls = tree.search_ancestor(leaf, 'classdef')
        if isinstance(cls, (tree.Class, tree.Function)):
            # Complete the methods that are defined in the super classes.
            random_context = self._module_context.create_context(
                cls,
                node_is_context=True
            )
        else:
            return

        if cls.start_pos[1] >= leaf.start_pos[1]:
            return

        filters = random_context.get_filters(search_global=False, is_instance=True)
        # The first dict is the dictionary of class itself.
        next(filters)
        for filter in filters:
            for name in filter.values():
                # TODO we should probably check here for properties
                if (name.api_type == 'function') == is_function:
                    yield name
Exemple #9
0
    def _follow_error_node_imports_if_possible(self, context, name):
        error_node = tree.search_ancestor(name, 'error_node')
        if error_node is not None:
            # Get the first command start of a started simple_stmt. The error
            # node is sometimes a small_stmt and sometimes a simple_stmt. Check
            # for ; leaves that start a new statements.
            start_index = 0
            for index, n in enumerate(error_node.children):
                if n.start_pos > name.start_pos:
                    break
                if n == ';':
                    start_index = index + 1
            nodes = error_node.children[start_index:]
            first_name = nodes[0].get_first_leaf().value

            # Make it possible to infer stuff like `import foo.` or
            # `from foo.bar`.
            if first_name in ('from', 'import'):
                is_import_from = first_name == 'from'
                level, names = helpers.parse_dotted_names(
                    nodes,
                    is_import_from=is_import_from,
                    until_node=name,
                )
                return imports.Importer(self, names, context.get_root_context(), level).follow()
        return None
Exemple #10
0
    def _get_class_context_completions(self, is_function=True):
        """
        Autocomplete inherited methods when overriding in child class.
        """
        leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
        cls = tree.search_ancestor(leaf, 'classdef')
        if isinstance(cls, (tree.Class, tree.Function)):
            # Complete the methods that are defined in the super classes.
            random_context = self._module_context.create_context(
                cls,
                node_is_context=True
            )
        else:
            return

        if cls.start_pos[1] >= leaf.start_pos[1]:
            return

        filters = random_context.get_filters(search_global=False, is_instance=True)
        # The first dict is the dictionary of class itself.
        next(filters)
        for filter in filters:
            for name in filter.values():
                if (name.api_type == 'function') == is_function:
                    yield name
Exemple #11
0
 def _is_in_right_scope(self, name):
     base = name
     hit_funcdef = False
     while True:
         base = search_ancestor(base, 'funcdef', 'classdef', 'lambdef')
         if base is self._parser_scope:
             return hit_funcdef
         hit_funcdef = True
Exemple #12
0
def _any_fstring_error(version, node):
    if version < (3, 9) or node is None:
        return False
    if node.type == "error_node":
        return any(child.type == "fstring_start" for child in node.children)
    elif node.type == "fstring":
        return True
    else:
        return search_ancestor(node, "fstring")
Exemple #13
0
 def search_all_comp_ancestors(node):
     has_ancestors = False
     while True:
         node = search_ancestor(node, 'testlist_comp', 'dictorsetmaker')
         if node is None:
             break
         for child in node.children:
             if child.type in _COMP_FOR_TYPES:
                 process_comp_for(child)
                 has_ancestors = True
                 break
     return has_ancestors
def _is_a_pytest_param(param_name):
    """
    Pytest params are either in a `test_*` function or have a pytest fixture
    with the decorator @pytest.fixture.

    This is a heuristic and will work in most cases.
    """
    funcdef = search_ancestor(param_name.tree_name, 'funcdef')
    if funcdef is None:  # A lambda
        return False
    decorators = funcdef.get_decorators()
    return _is_pytest_func(funcdef.name.value, decorators)
Exemple #15
0
    def get_yield_lazy_contexts(self, is_async=False):
        # TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
        for_parents = [(y,
                        tree.search_ancestor(y, 'for_stmt', 'funcdef',
                                             'while_stmt', 'if_stmt'))
                       for y in get_yield_exprs(self.evaluator, self.tree_node)
                       ]

        # Calculate if the yields are placed within the same for loop.
        yields_order = []
        last_for_stmt = None
        for yield_, for_stmt in for_parents:
            # For really simple for loops we can predict the order. Otherwise
            # we just ignore it.
            parent = for_stmt.parent
            if parent.type == 'suite':
                parent = parent.parent
            if for_stmt.type == 'for_stmt' and parent == self.tree_node \
                    and parser_utils.for_stmt_defines_one_name(for_stmt):  # Simplicity for now.
                if for_stmt == last_for_stmt:
                    yields_order[-1][1].append(yield_)
                else:
                    yields_order.append((for_stmt, [yield_]))
            elif for_stmt == self.tree_node:
                yields_order.append((None, [yield_]))
            else:
                types = self.get_return_values(check_yields=True)
                if types:
                    yield LazyKnownContexts(types)
                return
            last_for_stmt = for_stmt

        for for_stmt, yields in yields_order:
            if for_stmt is None:
                # No for_stmt, just normal yields.
                for yield_ in yields:
                    for result in self._get_yield_lazy_context(yield_):
                        yield result
            else:
                input_node = for_stmt.get_testlist()
                cn = ContextualizedNode(self, input_node)
                ordered = cn.infer().iterate(cn)
                ordered = list(ordered)
                for lazy_context in ordered:
                    dct = {
                        str(for_stmt.children[1].value): lazy_context.infer()
                    }
                    with helpers.predefine_names(self, for_stmt, dct):
                        for yield_in_same_for_stmt in yields:
                            for result in self._get_yield_lazy_context(
                                    yield_in_same_for_stmt):
                                yield result
Exemple #16
0
    def description(self):
        """
        A description of the :class:`.Definition` object, which is heavily used
        in testing. e.g. for ``isinstance`` it returns ``def isinstance``.

        Example:

        >>> from jedi._compatibility import no_unicode_pprint
        >>> from jedi import Script
        >>> source = '''
        ... def f():
        ...     pass
        ...
        ... class C:
        ...     pass
        ...
        ... variable = f if random.choice([0,1]) else C'''
        >>> script = Script(source, column=3)  # line is maximum by default
        >>> defs = script.goto_definitions()
        >>> defs = sorted(defs, key=lambda d: d.line)
        >>> no_unicode_pprint(defs)  # doctest: +NORMALIZE_WHITESPACE
        [<Definition full_name='__main__.f', description='def f'>,
         <Definition full_name='__main__.C', description='class C'>]
        >>> str(defs[0].description)  # strip literals in python2
        'def f'
        >>> str(defs[1].description)
        'class C'

        """
        typ = self.type
        tree_name = self._name.tree_name
        if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
            if typ == 'function':
                # For the description we want a short and a pythonic way.
                typ = 'def'
            return typ + ' ' + self._name.string_name
        elif typ == 'param':
            code = search_ancestor(tree_name, 'param').get_code(
                include_prefix=False,
                include_comma=False
            )
            return typ + ' ' + code

        definition = tree_name.get_definition() or tree_name
        # Remove the prefix, because that's not what we want for get_code
        # here.
        txt = definition.get_code(include_prefix=False)
        # Delete comments:
        txt = re.sub(r'#[^\n]+\n', ' ', txt)
        # Delete multi spaces/newlines
        txt = re.sub(r'\s+', ' ', txt).strip()
        return txt
Exemple #17
0
    def description(self):
        """
        A description of the :class:`.Definition` object, which is heavily used
        in testing. e.g. for ``isinstance`` it returns ``def isinstance``.

        Example:

        >>> from jedi import Script
        >>> source = '''
        ... def f():
        ...     pass
        ...
        ... class C:
        ...     pass
        ...
        ... variable = f if random.choice([0,1]) else C'''
        >>> script = Script(source, column=3)  # line is maximum by default
        >>> defs = script.goto_definitions()
        >>> defs = sorted(defs, key=lambda d: d.line)
        >>> defs
        [<Definition def f>, <Definition class C>]
        >>> str(defs[0].description)  # strip literals in python2
        'def f'
        >>> str(defs[1].description)
        'class C'

        """
        typ = self.type
        tree_name = self._name.tree_name
        if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
            if typ == 'function':
                # For the description we want a short and a pythonic way.
                typ = 'def'
            return typ + ' ' + u(self._name.string_name)
        elif typ == 'param':
            code = search_ancestor(tree_name, 'param').get_code(
                include_prefix=False,
                include_comma=False
            )
            return typ + ' ' + code


        definition = tree_name.get_definition() or tree_name
        # Remove the prefix, because that's not what we want for get_code
        # here.
        txt = definition.get_code(include_prefix=False)
        # Delete comments:
        txt = re.sub('#[^\n]+\n', ' ', txt)
        # Delete multi spaces/newlines
        txt = re.sub('\s+', ' ', txt).strip()
        return txt
Exemple #18
0
def _eval_expr_stmt(context, stmt, seek_name=None):
    """
    The starting point of the completion. A statement always owns a call
    list, which are the calls, that a statement does. In case multiple
    names are defined in the statement, `seek_name` returns the result for
    this name.

    :param stmt: A `tree.ExprStmt`.
    """
    debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name)
    rhs = stmt.get_rhs()
    context_set = context.eval_node(rhs)

    if seek_name:
        c_node = ContextualizedName(context, seek_name)
        context_set = check_tuple_assignments(context.evaluator, c_node,
                                              context_set)

    first_operator = next(stmt.yield_operators(), None)
    if first_operator not in ('=', None) and first_operator.type == 'operator':
        # `=` is always the last character in aug assignments -> -1
        operator = copy.copy(first_operator)
        operator.value = operator.value[:-1]
        name = stmt.get_defined_names()[0].value
        left = context.py__getattribute__(name,
                                          position=stmt.start_pos,
                                          search_global=True)

        for_stmt = tree.search_ancestor(stmt, 'for_stmt')
        if for_stmt is not None and for_stmt.type == 'for_stmt' and context_set \
                and parser_utils.for_stmt_defines_one_name(for_stmt):
            # Iterate through result and add the values, that's possible
            # only in for loops without clutter, because they are
            # predictable. Also only do it, if the variable is not a tuple.
            node = for_stmt.get_testlist()
            cn = ContextualizedNode(context, node)
            ordered = list(cn.infer().iterate(cn))

            for lazy_context in ordered:
                dct = {for_stmt.children[1].value: lazy_context.infer()}
                with helpers.predefine_names(context, for_stmt, dct):
                    t = context.eval_node(rhs)
                    left = _eval_comparison(context.evaluator, context, left,
                                            operator, t)
            context_set = left
        else:
            context_set = _eval_comparison(context.evaluator, context, left,
                                           operator, context_set)
    debug.dbg('eval_expr_stmt result %s', context_set)
    return context_set
Exemple #19
0
def get_call_signature_param_names(call_signatures):
    # add named params
    for call_sig in call_signatures:
        for p in call_sig.params:
            # Allow protected access, because it's a public API.
            tree_name = p._name.tree_name
            # Compiled modules typically don't allow keyword arguments.
            if tree_name is not None:
                # Allow access on _definition here, because it's a
                # public API and we don't want to make the internal
                # Name object public.
                tree_param = tree.search_ancestor(tree_name, 'param')
                if tree_param.star_count == 0:  # no *args/**kwargs
                    yield p._name
Exemple #20
0
 def create_instance_context(self, class_context, node):
     new = node
     while True:
         func_node = new
         new = search_ancestor(new, 'funcdef', 'classdef')
         if class_context.tree_node is new:
             func = FunctionValue.from_context(class_context, func_node)
             bound_method = BoundMethod(self, func)
             if func_node.name.value == '__init__':
                 context = bound_method.as_context(self._arguments)
             else:
                 context = bound_method.as_context()
             break
     return context.create_context(node)
Exemple #21
0
def _is_annotation_name(name):
    ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt')
    if ancestor is None:
        return False

    if ancestor.type in ('param', 'funcdef'):
        ann = ancestor.annotation
        if ann is not None:
            return ann.start_pos <= name.start_pos < ann.end_pos
    elif ancestor.type == 'expr_stmt':
        c = ancestor.children
        if len(c) > 1 and c[1].type == 'annassign':
            return c[1].start_pos <= name.start_pos < c[1].end_pos
    return False
Exemple #22
0
    def get_yield_lazy_contexts(self, is_async=False):
        # TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
        for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef',
                                                'while_stmt', 'if_stmt'))
                       for y in get_yield_exprs(self.evaluator, self.tree_node)]

        # Calculate if the yields are placed within the same for loop.
        yields_order = []
        last_for_stmt = None
        for yield_, for_stmt in for_parents:
            # For really simple for loops we can predict the order. Otherwise
            # we just ignore it.
            parent = for_stmt.parent
            if parent.type == 'suite':
                parent = parent.parent
            if for_stmt.type == 'for_stmt' and parent == self.tree_node \
                    and parser_utils.for_stmt_defines_one_name(for_stmt):  # Simplicity for now.
                if for_stmt == last_for_stmt:
                    yields_order[-1][1].append(yield_)
                else:
                    yields_order.append((for_stmt, [yield_]))
            elif for_stmt == self.tree_node:
                yields_order.append((None, [yield_]))
            else:
                types = self.get_return_values(check_yields=True)
                if types:
                    yield LazyKnownContexts(types)
                return
            last_for_stmt = for_stmt

        for for_stmt, yields in yields_order:
            if for_stmt is None:
                # No for_stmt, just normal yields.
                for yield_ in yields:
                    for result in self._get_yield_lazy_context(yield_):
                        yield result
            else:
                input_node = for_stmt.get_testlist()
                cn = ContextualizedNode(self, input_node)
                ordered = cn.infer().iterate(cn)
                ordered = list(ordered)
                for lazy_context in ordered:
                    dct = {str(for_stmt.children[1].value): lazy_context.infer()}
                    with helpers.predefine_names(self, for_stmt, dct):
                        for yield_in_same_for_stmt in yields:
                            for result in self._get_yield_lazy_context(yield_in_same_for_stmt):
                                yield result
Exemple #23
0
def _eval_expr_stmt(context, stmt, seek_name=None):
    """
    The starting point of the completion. A statement always owns a call
    list, which are the calls, that a statement does. In case multiple
    names are defined in the statement, `seek_name` returns the result for
    this name.

    :param stmt: A `tree.ExprStmt`.
    """
    debug.dbg('eval_expr_stmt %s (%s)', stmt, seek_name)
    rhs = stmt.get_rhs()
    context_set = context.eval_node(rhs)

    if seek_name:
        c_node = ContextualizedName(context, seek_name)
        context_set = check_tuple_assignments(context.evaluator, c_node, context_set)

    first_operator = next(stmt.yield_operators(), None)
    if first_operator not in ('=', None) and first_operator.type == 'operator':
        # `=` is always the last character in aug assignments -> -1
        operator = copy.copy(first_operator)
        operator.value = operator.value[:-1]
        name = stmt.get_defined_names()[0].value
        left = context.py__getattribute__(
            name, position=stmt.start_pos, search_global=True)

        for_stmt = tree.search_ancestor(stmt, 'for_stmt')
        if for_stmt is not None and for_stmt.type == 'for_stmt' and context_set \
                and parser_utils.for_stmt_defines_one_name(for_stmt):
            # Iterate through result and add the values, that's possible
            # only in for loops without clutter, because they are
            # predictable. Also only do it, if the variable is not a tuple.
            node = for_stmt.get_testlist()
            cn = ContextualizedNode(context, node)
            ordered = list(cn.infer().iterate(cn))

            for lazy_context in ordered:
                dct = {for_stmt.children[1].value: lazy_context.infer()}
                with helpers.predefine_names(context, for_stmt, dct):
                    t = context.eval_node(rhs)
                    left = _eval_comparison(context.evaluator, context, left, operator, t)
            context_set = left
        else:
            context_set = _eval_comparison(context.evaluator, context, left, operator, context_set)
    debug.dbg('eval_expr_stmt result %s', context_set)
    return context_set
Exemple #24
0
    def is_issue(self, node):
        if node.parent.type == 'testlist_comp':
            # [*[] for a in [1]]
            if node.parent.children[1].type in _COMP_FOR_TYPES:
                self.add_issue(node, message=self.message_iterable_unpacking)
        if self._normalizer.version <= (3, 4):
            n = search_ancestor(node, 'for_stmt', 'expr_stmt')
            found_definition = False
            if n is not None:
                if n.type == 'expr_stmt':
                    exprs = _get_expr_stmt_definition_exprs(n)
                else:
                    exprs = _get_for_stmt_definition_exprs(n)
                if node in exprs:
                    found_definition = True

            if not found_definition:
                self.add_issue(node, message=self.message_assignment)
Exemple #25
0
    def is_issue(self, node):
        if node.parent.type not in _STAR_EXPR_PARENTS:
            return True
        if node.parent.type == 'testlist_comp':
            # [*[] for a in [1]]
            if node.parent.children[1].type == 'comp_for':
                self.add_issue(node, message=self.message_iterable_unpacking)
        if self._normalizer.version <= (3, 4):
            n = search_ancestor(node, 'for_stmt', 'expr_stmt')
            found_definition = False
            if n is not None:
                if n.type == 'expr_stmt':
                    exprs = _get_expr_stmt_definition_exprs(n)
                else:
                    exprs = _get_for_stmt_definition_exprs(n)
                if node in exprs:
                    found_definition = True

            if not found_definition:
                self.add_issue(node, message=self.message_assignment)
Exemple #26
0
    def __init__(self, config, parent_indentation, containing_leaf, spacing, parent=None):
        expr_stmt = search_ancestor(containing_leaf, 'expr_stmt')
        if expr_stmt is not None:
            equals = expr_stmt.children[-2]

            if '\t' in config.indentation:
                # TODO unite with the code of BracketNode
                self.indentation = None
            else:
                # If the backslash follows the equals, use normal indentation
                # otherwise it should align with the equals.
                if equals.end_pos == spacing.start_pos:
                    self.indentation = parent_indentation + config.indentation
                else:
                    # +1 because there is a space.
                    self.indentation =  ' ' * (equals.end_pos[1] + 1)
        else:
            self.indentation = parent_indentation + config.indentation
        self.bracket_indentation = self.indentation
        self.parent = parent
Exemple #27
0
def generate_docstring(source,
                       position=(1, 0),
                       formatter="google",
                       autocomplete=False):
    """Generate a docstring

    Args:
        source (str): the text of the source
        position (tuple): the position of the cursor in the source, row, column. Rows start at 1
            Columns start at 0
        formatter (str): the format of the docstring choose from google, numpy, reST.
        autocomplete (bool): Whether or not to remove three characters from before the position prior
            to parsing the code. THis is to remove the \"\"\" before a docstring default: False

    Raises:
        exc.InvalidFormatter: If the value provided to `formatter` is not a supported
            formatter name

    Returns:
       str or None: docstring, excluding quotation marks, or None, if one could not be generated
    """
    if autocomplete:
        lines = source.splitlines(True)
        # all full lines before the one the position is on
        lines_before = lines[:position[0] - 1]
        # position in buffer is length of all those lines + the column position (starting at 0)
        bufferpos = sum(len(l) for l in lines_before) + position[1]
        # Splice the desired bits of the source together
        slice1 = source[:bufferpos - 3]
        slice2 = source[bufferpos:]
        source = slice1 + slice2
        # Shift the position to account for the removed quotes
        position = (position[0], position[1] - 3)

    tree = parso.parse(source)
    assert isinstance(tree, BaseNode)
    try:
        leaf = tree.get_leaf_for_position(position, include_prefixes=True)
    except ValueError as e:
        leaf = tree
    if not leaf:  # pragma: no cover
        raise exc.FailedToGenerateDocstringError(
            "Could not find leaf at cursor position {}".format(position))
    scopes = ('classdef', 'funcdef', 'file_input')
    scope = search_ancestor(leaf, *scopes)
    if not scope:
        if leaf.type == 'file_input':
            scope = leaf
        else:  # pragma: no cover
            raise exc.FailedToGenerateDocstringError(
                "Could not find scope of leaf {} ".format(leaf))

    formatter_module = formatters.get(formatter)
    if scope.type == 'classdef':
        return formatter_module.class_docstring(scope)
    elif scope.type == 'funcdef':
        return formatter_module.function_docstring(scope)
    elif scope.type == 'file_input':
        return formatter_module.module_docstring(scope)

    raise exc.FailedToGenerateDocstringError(
        "Failed to generate Docstring for: {}".format(
            scope))  # pragma: no cover
Exemple #28
0
def _infer_expr_stmt(context, stmt, seek_name=None):
    """
    The starting point of the completion. A statement always owns a call
    list, which are the calls, that a statement does. In case multiple
    names are defined in the statement, `seek_name` returns the result for
    this name.

    expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
                     ('=' (yield_expr|testlist_star_expr))*)
    annassign: ':' test ['=' test]
    augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
                '<<=' | '>>=' | '**=' | '//=')

    :param stmt: A `tree.ExprStmt`.
    """
    def check_setitem(stmt):
        atom_expr = stmt.children[0]
        if atom_expr.type not in ('atom_expr', 'power'):
            return False, None
        name = atom_expr.children[0]
        if name.type != 'name' or len(atom_expr.children) != 2:
            return False, None
        trailer = atom_expr.children[-1]
        return trailer.children[0] == '[', trailer.children[1]

    debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name)
    rhs = stmt.get_rhs()

    value_set = context.infer_node(rhs)

    if seek_name:
        n = TreeNameDefinition(context, seek_name)
        value_set = check_tuple_assignments(n, value_set)

    first_operator = next(stmt.yield_operators(), None)
    is_setitem, subscriptlist = check_setitem(stmt)
    is_annassign = first_operator not in (
        '=', None) and first_operator.type == 'operator'
    if is_annassign or is_setitem:
        # `=` is always the last character in aug assignments -> -1
        name = stmt.get_defined_names(include_setitem=True)[0].value
        left_values = context.py__getattribute__(name, position=stmt.start_pos)

        if is_setitem:

            def to_mod(v):
                c = ContextualizedSubscriptListNode(context, subscriptlist)
                if v.array_type == 'dict':
                    return DictModification(v, value_set, c)
                elif v.array_type == 'list':
                    return ListModification(v, value_set, c)
                return v

            value_set = ValueSet(to_mod(v) for v in left_values)
        else:
            operator = copy.copy(first_operator)
            operator.value = operator.value[:-1]
            for_stmt = tree.search_ancestor(stmt, 'for_stmt')
            if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \
                    and parser_utils.for_stmt_defines_one_name(for_stmt):
                # Iterate through result and add the values, that's possible
                # only in for loops without clutter, because they are
                # predictable. Also only do it, if the variable is not a tuple.
                node = for_stmt.get_testlist()
                cn = ContextualizedNode(context, node)
                ordered = list(cn.infer().iterate(cn))

                for lazy_value in ordered:
                    dct = {for_stmt.children[1].value: lazy_value.infer()}
                    with context.predefine_names(for_stmt, dct):
                        t = context.infer_node(rhs)
                        left_values = _infer_comparison(
                            context, left_values, operator, t)
                value_set = left_values
            else:
                value_set = _infer_comparison(context, left_values, operator,
                                              value_set)
    debug.dbg('infer_expr_stmt result %s', value_set)
    return value_set
Exemple #29
0
def infer_atom(context, atom):
    """
    Basically to process ``atom`` nodes. The parser sometimes doesn't
    generate the node (because it has just one child). In that case an atom
    might be a name or a literal as well.
    """
    state = context.inference_state
    if atom.type == 'name':
        # This is the first global lookup.
        stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef',
                                    'if_stmt') or atom
        if stmt.type == 'if_stmt':
            if not any(n.start_pos <= atom.start_pos < n.end_pos
                       for n in stmt.get_test_nodes()):
                stmt = atom
        elif stmt.type == 'lambdef':
            stmt = atom
        position = stmt.start_pos
        if _is_annotation_name(atom):
            # Since Python 3.7 (with from __future__ import annotations),
            # annotations are essentially strings and can reference objects
            # that are defined further down in code. Therefore just set the
            # position to None, so the finder will not try to stop at a certain
            # position in the module.
            position = None
        return context.py__getattribute__(atom, position=position)
    elif atom.type == 'keyword':
        # For False/True/None
        if atom.value in ('False', 'True', 'None'):
            return ValueSet([compiled.builtin_from_name(state, atom.value)])
        elif atom.value == 'yield':
            # Contrary to yield from, yield can just appear alone to return a
            # value when used with `.send()`.
            return NO_VALUES
        assert False, 'Cannot infer the keyword %s' % atom

    elif isinstance(atom, tree.Literal):
        string = state.compiled_subprocess.safe_literal_eval(atom.value)
        return ValueSet([compiled.create_simple_object(state, string)])
    elif atom.type == 'strings':
        # Will be multiple string.
        value_set = infer_atom(context, atom.children[0])
        for string in atom.children[1:]:
            right = infer_atom(context, string)
            value_set = _infer_comparison(context, value_set, '+', right)
        return value_set
    elif atom.type == 'fstring':
        return compiled.get_string_value_set(state)
    else:
        c = atom.children
        # Parentheses without commas are not tuples.
        if c[0] == '(' and not len(c) == 2 \
                and not(c[1].type == 'testlist_comp'
                        and len(c[1].children) > 1):
            return context.infer_node(c[1])

        try:
            comp_for = c[1].children[1]
        except (IndexError, AttributeError):
            pass
        else:
            if comp_for == ':':
                # Dict comprehensions have a colon at the 3rd index.
                try:
                    comp_for = c[1].children[3]
                except IndexError:
                    pass

            if comp_for.type in ('comp_for', 'sync_comp_for'):
                return ValueSet(
                    [iterable.comprehension_from_atom(state, context, atom)])

        # It's a dict/list/tuple literal.
        array_node = c[1]
        try:
            array_node_c = array_node.children
        except AttributeError:
            array_node_c = []
        if c[0] == '{' and (array_node == '}' or ':' in array_node_c
                            or '**' in array_node_c):
            new_value = iterable.DictLiteralValue(state, context, atom)
        else:
            new_value = iterable.SequenceLiteralValue(state, context, atom)
        return ValueSet([new_value])
Exemple #30
0
    def _goto(self, context, name):
        definition = name.get_definition(import_name_always=True)
        if definition is not None:
            type_ = definition.type
            if type_ == 'expr_stmt':
                # Only take the parent, because if it's more complicated than just
                # a name it's something you can "goto" again.
                is_simple_name = name.parent.type not in ('power', 'trailer')
                if is_simple_name:
                    return [TreeNameDefinition(context, name)]
            elif type_ == 'param':
                return [ParamName(context, name)]
            elif type_ in ('funcdef', 'classdef'):
                return [TreeNameDefinition(context, name)]
            elif type_ in ('import_from', 'import_name'):
                module_names = imports.infer_import(context, name, is_goto=True)
                return module_names
        else:
            contexts = self._follow_error_node_imports_if_possible(context, name)
            if contexts is not None:
                return [context.name for context in contexts]

        par = name.parent
        node_type = par.type
        if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:
            # Named param goto.
            trailer = par.parent
            if trailer.type == 'arglist':
                trailer = trailer.parent
            if trailer.type != 'classdef':
                if trailer.type == 'decorator':
                    context_set = context.eval_node(trailer.children[1])
                else:
                    i = trailer.parent.children.index(trailer)
                    to_evaluate = trailer.parent.children[:i]
                    if to_evaluate[0] == 'await':
                        to_evaluate.pop(0)
                    context_set = context.eval_node(to_evaluate[0])
                    for trailer in to_evaluate[1:]:
                        context_set = eval_trailer(context, context_set, trailer)
                param_names = []
                for context in context_set:
                    for signature in context.get_signatures():
                        for param_name in signature.get_param_names():
                            if param_name.string_name == name.value:
                                param_names.append(param_name)
                return param_names
        elif node_type == 'dotted_name':  # Is a decorator.
            index = par.children.index(name)
            if index > 0:
                new_dotted = helpers.deep_ast_copy(par)
                new_dotted.children[index - 1:] = []
                values = context.eval_node(new_dotted)
                return unite(
                    value.py__getattribute__(name, name_context=context, is_goto=True)
                    for value in values
                )

        if node_type == 'trailer' and par.children[0] == '.':
            values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
            return unite(
                value.py__getattribute__(name, name_context=context, is_goto=True)
                for value in values
            )
        else:
            stmt = tree.search_ancestor(
                name, 'expr_stmt', 'lambdef'
            ) or name
            if stmt.type == 'lambdef':
                stmt = name
            return context.py__getattribute__(
                name,
                position=stmt.start_pos,
                search_global=True, is_goto=True
            )
Exemple #31
0
    def eval_atom(self, context, atom):
        """
        Basically to process ``atom`` nodes. The parser sometimes doesn't
        generate the node (because it has just one child). In that case an atom
        might be a name or a literal as well.
        """
        if atom.type == 'name':
            # This is the first global lookup.
            stmt = tree.search_ancestor(
                atom, 'expr_stmt', 'lambdef'
            ) or atom
            if stmt.type == 'lambdef':
                stmt = atom
            return context.py__getattribute__(
                name_or_str=atom,
                position=stmt.start_pos,
                search_global=True
            )
        elif isinstance(atom, tree.Literal):
            string = parser_utils.safe_literal_eval(atom.value)
            return set([compiled.create(self, string)])
        else:
            c = atom.children
            if c[0].type == 'string':
                # Will be one string.
                types = self.eval_atom(context, c[0])
                for string in c[1:]:
                    right = self.eval_atom(context, string)
                    types = precedence.calculate(self, context, types, '+', right)
                return types
            # Parentheses without commas are not tuples.
            elif c[0] == '(' and not len(c) == 2 \
                    and not(c[1].type == 'testlist_comp' and
                            len(c[1].children) > 1):
                return self.eval_element(context, c[1])

            try:
                comp_for = c[1].children[1]
            except (IndexError, AttributeError):
                pass
            else:
                if comp_for == ':':
                    # Dict comprehensions have a colon at the 3rd index.
                    try:
                        comp_for = c[1].children[3]
                    except IndexError:
                        pass

                if comp_for.type == 'comp_for':
                    return set([iterable.Comprehension.from_atom(self, context, atom)])

            # It's a dict/list/tuple literal.
            array_node = c[1]
            try:
                array_node_c = array_node.children
            except AttributeError:
                array_node_c = []
            if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
                context = iterable.DictLiteralContext(self, context, atom)
            else:
                context = iterable.SequenceLiteralContext(self, context, atom)
            return set([context])
Exemple #32
0
    def goto(self, context, name):
        definition = name.get_definition(import_name_always=True)
        if definition is not None:
            type_ = definition.type
            if type_ == 'expr_stmt':
                # Only take the parent, because if it's more complicated than just
                # a name it's something you can "goto" again.
                is_simple_name = name.parent.type not in ('power', 'trailer')
                if is_simple_name:
                    return [TreeNameDefinition(context, name)]
            elif type_ == 'param':
                return [ParamName(context, name)]
            elif type_ in ('funcdef', 'classdef'):
                return [TreeNameDefinition(context, name)]
            elif type_ in ('import_from', 'import_name'):
                module_names = imports.infer_import(context, name, is_goto=True)
                return module_names

        par = name.parent
        node_type = par.type
        if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:
            # Named param goto.
            trailer = par.parent
            if trailer.type == 'arglist':
                trailer = trailer.parent
            if trailer.type != 'classdef':
                if trailer.type == 'decorator':
                    context_set = context.eval_node(trailer.children[1])
                else:
                    i = trailer.parent.children.index(trailer)
                    to_evaluate = trailer.parent.children[:i]
                    if to_evaluate[0] == 'await':
                        to_evaluate.pop(0)
                    context_set = context.eval_node(to_evaluate[0])
                    for trailer in to_evaluate[1:]:
                        context_set = eval_trailer(context, context_set, trailer)
                param_names = []
                for context in context_set:
                    try:
                        get_param_names = context.get_param_names
                    except AttributeError:
                        pass
                    else:
                        for param_name in get_param_names():
                            if param_name.string_name == name.value:
                                param_names.append(param_name)
                return param_names
        elif node_type == 'dotted_name':  # Is a decorator.
            index = par.children.index(name)
            if index > 0:
                new_dotted = helpers.deep_ast_copy(par)
                new_dotted.children[index - 1:] = []
                values = context.eval_node(new_dotted)
                return unite(
                    value.py__getattribute__(name, name_context=context, is_goto=True)
                    for value in values
                )

        if node_type == 'trailer' and par.children[0] == '.':
            values = helpers.evaluate_call_of_leaf(context, name, cut_own_trailer=True)
            return unite(
                value.py__getattribute__(name, name_context=context, is_goto=True)
                for value in values
            )
        else:
            stmt = tree.search_ancestor(
                name, 'expr_stmt', 'lambdef'
            ) or name
            if stmt.type == 'lambdef':
                stmt = name
            return context.py__getattribute__(
                name,
                position=stmt.start_pos,
                search_global=True, is_goto=True
            )
Exemple #33
0
def eval_atom(context, atom):
    """
    Basically to process ``atom`` nodes. The parser sometimes doesn't
    generate the node (because it has just one child). In that case an atom
    might be a name or a literal as well.
    """
    if atom.type == 'name':
        # This is the first global lookup.
        stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef') or atom
        if stmt.type == 'lambdef':
            stmt = atom
        return context.py__getattribute__(name_or_str=atom,
                                          position=stmt.start_pos,
                                          search_global=True)
    elif atom.type == 'keyword':
        # For False/True/None
        if atom.value in ('False', 'True', 'None'):
            return ContextSet(
                compiled.builtin_from_name(context.evaluator, atom.value))
        elif atom.value == 'print':
            # print e.g. could be evaluated like this in Python 2.7
            return NO_CONTEXTS
        elif atom.value == 'yield':
            # Contrary to yield from, yield can just appear alone to return a
            # value when used with `.send()`.
            return NO_CONTEXTS
        assert False, 'Cannot evaluate the keyword %s' % atom

    elif isinstance(atom, tree.Literal):
        string = context.evaluator.compiled_subprocess.safe_literal_eval(
            atom.value)
        return ContextSet(
            compiled.create_simple_object(context.evaluator, string))
    elif atom.type == 'strings':
        # Will be multiple string.
        context_set = eval_atom(context, atom.children[0])
        for string in atom.children[1:]:
            right = eval_atom(context, string)
            context_set = _eval_comparison(context.evaluator, context,
                                           context_set, u'+', right)
        return context_set
    else:
        c = atom.children
        # Parentheses without commas are not tuples.
        if c[0] == '(' and not len(c) == 2 \
                and not(c[1].type == 'testlist_comp' and
                        len(c[1].children) > 1):
            return context.eval_node(c[1])

        try:
            comp_for = c[1].children[1]
        except (IndexError, AttributeError):
            pass
        else:
            if comp_for == ':':
                # Dict comprehensions have a colon at the 3rd index.
                try:
                    comp_for = c[1].children[3]
                except IndexError:
                    pass

            if comp_for.type == 'comp_for':
                return ContextSet(
                    iterable.comprehension_from_atom(context.evaluator,
                                                     context, atom))

        # It's a dict/list/tuple literal.
        array_node = c[1]
        try:
            array_node_c = array_node.children
        except AttributeError:
            array_node_c = []
        if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
            context = iterable.DictLiteralContext(context.evaluator, context,
                                                  atom)
        else:
            context = iterable.SequenceLiteralContext(context.evaluator,
                                                      context, atom)
        return ContextSet(context)
Exemple #34
0
def eval_atom(context, atom):
    """
    Basically to process ``atom`` nodes. The parser sometimes doesn't
    generate the node (because it has just one child). In that case an atom
    might be a name or a literal as well.
    """
    if atom.type == 'name':
        # This is the first global lookup.
        stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef') or atom
        if stmt.type == 'lambdef':
            stmt = atom
        return context.py__getattribute__(name_or_str=atom,
                                          position=stmt.start_pos,
                                          search_global=True)

    elif isinstance(atom, tree.Literal):
        string = parser_utils.safe_literal_eval(atom.value)
        return ContextSet(compiled.create(context.evaluator, string))
    else:
        c = atom.children
        if c[0].type == 'string':
            # Will be one string.
            context_set = eval_atom(context, c[0])
            for string in c[1:]:
                right = eval_atom(context, string)
                context_set = _eval_comparison(context.evaluator, context,
                                               context_set, '+', right)
            return context_set
        # Parentheses without commas are not tuples.
        elif c[0] == '(' and not len(c) == 2 \
                and not(c[1].type == 'testlist_comp' and
                        len(c[1].children) > 1):
            return context.eval_node(c[1])

        try:
            comp_for = c[1].children[1]
        except (IndexError, AttributeError):
            pass
        else:
            if comp_for == ':':
                # Dict comprehensions have a colon at the 3rd index.
                try:
                    comp_for = c[1].children[3]
                except IndexError:
                    pass

            if comp_for.type == 'comp_for':
                return ContextSet(
                    iterable.Comprehension.from_atom(context.evaluator,
                                                     context, atom))

        # It's a dict/list/tuple literal.
        array_node = c[1]
        try:
            array_node_c = array_node.children
        except AttributeError:
            array_node_c = []
        if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
            context = iterable.DictLiteralContext(context.evaluator, context,
                                                  atom)
        else:
            context = iterable.SequenceLiteralContext(context.evaluator,
                                                      context, atom)
        return ContextSet(context)
Exemple #35
0
def eval_atom(context, atom):
    """
    Basically to process ``atom`` nodes. The parser sometimes doesn't
    generate the node (because it has just one child). In that case an atom
    might be a name or a literal as well.
    """
    if atom.type == 'name':
        # This is the first global lookup.
        stmt = tree.search_ancestor(
            atom, 'expr_stmt', 'lambdef'
        ) or atom
        if stmt.type == 'lambdef':
            stmt = atom
        return context.py__getattribute__(
            name_or_str=atom,
            position=stmt.start_pos,
            search_global=True
        )
    elif atom.type == 'keyword':
        # For False/True/None
        if atom.value in ('False', 'True', 'None'):
            return ContextSet(compiled.builtin_from_name(context.evaluator, atom.value))
        elif atom.value == 'print':
            # print e.g. could be evaluated like this in Python 2.7
            return NO_CONTEXTS
        elif atom.value == 'yield':
            # Contrary to yield from, yield can just appear alone to return a
            # value when used with `.send()`.
            return NO_CONTEXTS
        assert False, 'Cannot evaluate the keyword %s' % atom

    elif isinstance(atom, tree.Literal):
        string = context.evaluator.compiled_subprocess.safe_literal_eval(atom.value)
        return ContextSet(compiled.create_simple_object(context.evaluator, string))
    elif atom.type == 'strings':
        # Will be multiple string.
        context_set = eval_atom(context, atom.children[0])
        for string in atom.children[1:]:
            right = eval_atom(context, string)
            context_set = _eval_comparison(context.evaluator, context, context_set, u'+', right)
        return context_set
    else:
        c = atom.children
        # Parentheses without commas are not tuples.
        if c[0] == '(' and not len(c) == 2 \
                and not(c[1].type == 'testlist_comp' and
                        len(c[1].children) > 1):
            return context.eval_node(c[1])

        try:
            comp_for = c[1].children[1]
        except (IndexError, AttributeError):
            pass
        else:
            if comp_for == ':':
                # Dict comprehensions have a colon at the 3rd index.
                try:
                    comp_for = c[1].children[3]
                except IndexError:
                    pass

            if comp_for.type == 'comp_for':
                return ContextSet(iterable.comprehension_from_atom(
                    context.evaluator, context, atom
                ))

        # It's a dict/list/tuple literal.
        array_node = c[1]
        try:
            array_node_c = array_node.children
        except AttributeError:
            array_node_c = []
        if c[0] == '{' and (array_node == '}' or ':' in array_node_c):
            context = iterable.DictLiteralContext(context.evaluator, context, atom)
        else:
            context = iterable.SequenceLiteralContext(context.evaluator, context, atom)
        return ContextSet(context)
Exemple #36
0
    def _check_assignment(self,
                          node,
                          is_deletion=False,
                          is_namedexpr=False,
                          is_aug_assign=False):
        error = None
        type_ = node.type
        if type_ == 'lambdef':
            error = 'lambda'
        elif type_ == 'atom':
            first, second = node.children[:2]
            error = _get_comprehension_type(node)
            if error is None:
                if second.type == 'dictorsetmaker':
                    if self._normalizer.version < (3, 8):
                        error = 'literal'
                    else:
                        if second.children[1] == ':':
                            error = 'dict display'
                        else:
                            error = 'set display'
                elif first == "{" and second == "}":
                    if self._normalizer.version < (3, 8):
                        error = 'literal'
                    else:
                        error = "dict display"
                elif first == "{" and len(node.children) > 2:
                    if self._normalizer.version < (3, 8):
                        error = 'literal'
                    else:
                        error = "set display"
                elif first in ('(', '['):
                    if second.type == 'yield_expr':
                        error = 'yield expression'
                    elif second.type == 'testlist_comp':
                        # ([a, b] := [1, 2])
                        # ((a, b) := [1, 2])
                        if is_namedexpr:
                            if first == '(':
                                error = 'tuple'
                            elif first == '[':
                                error = 'list'

                        # This is not a comprehension, they were handled
                        # further above.
                        for child in second.children[::2]:
                            self._check_assignment(child, is_deletion,
                                                   is_namedexpr, is_aug_assign)
                    else:  # Everything handled, must be useless brackets.
                        self._check_assignment(second, is_deletion,
                                               is_namedexpr, is_aug_assign)
        elif type_ == 'keyword':
            if node.value == "yield":
                error = "yield expression"
            elif self._normalizer.version < (3, 8):
                error = 'keyword'
            else:
                error = str(node.value)
        elif type_ == 'operator':
            if node.value == '...':
                error = 'Ellipsis'
        elif type_ == 'comparison':
            error = 'comparison'
        elif type_ in ('string', 'number', 'strings'):
            error = 'literal'
        elif type_ == 'yield_expr':
            # This one seems to be a slightly different warning in Python.
            message = 'assignment to yield expression not possible'
            self.add_issue(node, message=message)
        elif type_ == 'test':
            error = 'conditional expression'
        elif type_ in ('atom_expr', 'power'):
            if node.children[0] == 'await':
                error = 'await expression'
            elif node.children[-2] == '**':
                error = 'operator'
            else:
                # Has a trailer
                trailer = node.children[-1]
                assert trailer.type == 'trailer'
                if trailer.children[0] == '(':
                    error = 'function call'
                elif is_namedexpr and trailer.children[0] == '[':
                    error = 'subscript'
                elif is_namedexpr and trailer.children[0] == '.':
                    error = 'attribute'
        elif type_ == "fstring":
            if self._normalizer.version < (3, 8):
                error = 'literal'
            else:
                error = "f-string expression"
        elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
            for child in node.children[::2]:
                self._check_assignment(child, is_deletion, is_namedexpr,
                                       is_aug_assign)
        elif ('expr' in type_ and type_ != 'star_expr'  # is a substring
              or '_test' in type_ or type_ in ('term', 'factor')):
            error = 'operator'
        elif type_ == "star_expr":
            if is_deletion:
                if self._normalizer.version >= (3, 9):
                    error = "starred"
                else:
                    self.add_issue(node,
                                   message="can't use starred expression here")
            elif not search_ancestor(node, *
                                     _STAR_EXPR_PARENTS) and not is_aug_assign:
                self.add_issue(
                    node,
                    message=
                    "starred assignment target must be in a list or tuple")

            self._check_assignment(node.children[1])

        if error is not None:
            if is_namedexpr:
                message = 'cannot use assignment expressions with %s' % error
            else:
                cannot = "can't" if self._normalizer.version < (
                    3, 8) else "cannot"
                message = ' '.join(
                    [cannot, "delete" if is_deletion else "assign to", error])
            self.add_issue(node, message=message)