Ejemplo n.º 1
0
def find_token(ast, line, col) -> Optional[TokenInfo]:
    for stack, node in _iter_nodes(ast):
        try:
            tokens = node.tokens
        except AttributeError:
            continue

        last_token = None
        for token in tokens:
            lineno = token.lineno - 1
            if lineno != line:
                continue

            if token.type == token.SEPARATOR:
                # For separator tokens, it must be entirely within the section
                # i.e.: if it's in the boundary for a word, we want the word,
                # not the separator.
                if token.col_offset < col < token.end_col_offset:
                    return TokenInfo(tuple(stack), node, token)

            elif token.type == token.EOL:
                # A trailing whitespace after a keyword should be part of
                # the keyword, not EOL.
                if token.col_offset <= col <= token.end_col_offset:
                    diff = col - token.col_offset
                    if last_token is not None and not token.value.strip():
                        eol_contents = token.value[:diff]
                        if len(eol_contents) <= 1:
                            token = _append_eol_to_prev_token(
                                last_token, eol_contents)

                    return TokenInfo(tuple(stack), node, token)

            else:
                if token.col_offset <= col <= token.end_col_offset:
                    return TokenInfo(tuple(stack), node, token)

            last_token = token

    return None
Ejemplo n.º 2
0
def find_token(ast, line, col) -> Optional[TokenInfo]:
    for stack, node in _iter_nodes(ast):
        try:
            tokens = node.tokens
        except AttributeError:
            continue
        for token in tokens:
            lineno = token.lineno - 1
            if lineno != line:
                continue

            if token.type == token.SEPARATOR:
                # For separator tokens, it must be entirely within the section
                # i.e.: if it's in the boundary for a word, we want the word,
                # not the separator.
                if token.col_offset < col < token.end_col_offset:
                    return TokenInfo(tuple(stack), node, token)
            else:
                if token.col_offset <= col <= token.end_col_offset:
                    return TokenInfo(tuple(stack), node, token)

    return None
Ejemplo n.º 3
0
def find_variable(ast, line, col) -> Optional[TokenInfo]:
    token_info = find_token(ast, line, col)
    if token_info is not None:
        token = token_info.token
        if "{" in token.value:
            for part in _tokenize_variables_even_when_invalid(token, col):
                if part.col_offset <= col <= part.end_col_offset:
                    if part.type == part.VARIABLE:
                        return TokenInfo(token_info.stack, token_info.node, part)
                    else:
                        return None
            else:
                return None
    return None
Ejemplo n.º 4
0
def iter_variable_assigns(ast) -> Iterator:
    from robot.api import Token

    for stack, node in _iter_nodes(ast, recursive=False):
        if node.__class__.__name__ == "KeywordCall":
            for token in node.get_tokens(Token.ASSIGN):
                value = token.value
                i = value.rfind("}")
                if i > 0:
                    new_value = value[: i + 1]
                    token = Token(
                        type=token.type,
                        value=new_value,
                        lineno=token.lineno,
                        col_offset=token.col_offset,
                        error=token.error,
                    )

                yield TokenInfo(tuple(stack), node, token)
Ejemplo n.º 5
0
    def get_current_keyword_definition_and_usage_info(
        self
    ) -> Optional[Tuple[IKeywordDefinition, KeywordUsageInfo]]:
        """
        Provides the current keyword even if we're in its arguments and not actually
        on the keyword itself.
        """
        from robotframework_ls.impl.find_definition import find_keyword_definition
        from robotframework_ls.impl import ast_utils

        token_info = self.get_current_token()
        if token_info is not None:
            cp: ICompletionContext = self
            while token_info.token.type == token_info.token.EOL:
                sel = cp.sel
                if sel.col > 0:
                    cp = cp.create_copy_with_selection(sel.line, sel.col - 1)
                    token_info = cp.get_current_token()
                else:
                    break

            usage_info = ast_utils.create_keyword_usage_info(
                token_info.stack, token_info.node
            )
            if usage_info is not None:
                token = usage_info.token

                # token line is 1-based and col is 0-based (make both 0-based here).
                line = token.lineno - 1
                col = token.col_offset
                cp = cp.create_copy_with_selection(line, col)
                definitions = find_keyword_definition(
                    cp, TokenInfo(usage_info.stack, usage_info.node, usage_info.token)
                )
                if definitions and len(definitions) >= 1:
                    definition: IKeywordDefinition = next(iter(definitions))
                    return definition, usage_info
        return None