コード例 #1
0
 def align_row(row, indent, look_up):
     aligned_row = [Token(Token.SEPARATOR, indent * ' ')]
     row_len = len(row)
     for i, c in enumerate(row):
         aligned_row.append(c)
         if i < row_len - 1:
             separator = Token(Token.SEPARATOR,
                               (look_up[i] - len(c.value) + 4) * ' ')
             aligned_row.append(separator)
     aligned_row.append(Token(Token.EOL, '\n'))
     return Statement.from_tokens(aligned_row)
コード例 #2
0
 def split_to_new_line(self, iterator, indent, not_split_first=False):
     iter_gen = (elem for elem in iterator)
     if not_split_first:
         elem = next(iter_gen)
         yield Token(Token.SEPARATOR, self.separator * ' ')
         yield elem
     for elem in iter_gen:
         yield Token(Token.EOL, '\n')
         yield indent
         yield Token(Token.CONTINUATION, '...')
         yield Token(Token.SEPARATOR, self.separator * ' ')
         yield elem
     yield Token(Token.EOL, '\n')
コード例 #3
0
def _strip_token_bdd_prefix(token):
    """
    This is a workaround because the parsing does not separate a BDD prefix from
    the keyword name. If the parsing is improved to do that separation in the future
    we can stop doing this.
    
    :return Token:
        Returns a new token with the bdd prefix stripped or the original token passed.
    """
    from robotframework_ls.impl.robot_constants import BDD_PREFIXES
    from robot.api import Token

    if token is None:
        return token

    text = token.value.lower()
    for prefix in BDD_PREFIXES:
        if text.startswith(prefix):
            new_name = token.value[len(prefix) :]
            return Token(
                type=token.type,
                value=new_name,
                lineno=token.lineno,
                col_offset=token.col_offset + len(prefix),
                error=token.error,
            )
    return token
コード例 #4
0
def _append_eol_to_prev_token(last_token, eol_token_contents):
    from robot.api import Token

    new_value = last_token.value + eol_token_contents

    return Token(
        type=last_token.type,
        value=new_value,
        lineno=last_token.lineno,
        col_offset=last_token.col_offset,
        error=last_token.error,
    )
コード例 #5
0
ファイル: test_tokens.py プロジェクト: xylix/robotframework
 def test_string_repr(self):
     for token, exp_str, exp_repr in [
         ((Token.ELSE_IF, 'ELSE IF', 6, 4), 'ELSE IF',
          "Token(ELSE_IF, 'ELSE IF', 6, 4)"),
         ((Token.KEYWORD, u'Hyv\xe4', 6, 4), u'Hyv\xe4',
          u"Token(KEYWORD, %r, 6, 4)" % u'Hyv\xe4'),
         ((Token.ERROR, 'bad value', 6, 4, 'The error.'), 'bad value',
          "Token(ERROR, 'bad value', 6, 4, 'The error.')"),
         (((), '', "Token(None, '', -1, -1)"))
     ]:
         token = Token(*token)
         assert_equal(str(token), exp_str)
         assert_equal(repr(token), exp_repr)
コード例 #6
0
    def visit_File(self, node):  # noqa
        self.generic_visit(node)
        if node.sections and node.sections[-1].body:
            self.trim_trailing_empty_lines(node.sections[-1])
            self.trim_trailing_empty_lines(node.sections[-1].body[-1])
            if not self.is_keyword_or_tests_section(node.sections[-1]):
                node.sections[-1].body[-1].tokens = node.sections[-1].body[
                    -1].tokens[:-1] + (Token(Token.EOL, '\n'), )

        node.sections = [
            section for section in node.sections
            if not self.only_empty_lines(section)
        ]
コード例 #7
0
    def test_string_repr(self):

        for token, exp_str, exp_repr in [
            ((Token.ELSE_IF, 'ELSE IF', 6, 4), 'ELSE IF',
             "Token('ELSE IF', 'ELSE IF', 6, 4)"),
            ((Token.KEYWORD, u'Hyv\xe4', 6, 4), u'Hyv\xe4',
             u"Token('KEYWORD', %r, 6, 4)" % u'Hyv\xe4'),
            ((Token.ERROR, 'bad value', 6, 4, 'The error.'), 'bad value',
             "Token('ERROR', 'bad value', 6, 4, 'The error.')")
        ]:
            token = Token(*token)
            assert_equal(unicode(token), exp_str)
            assert_equal(repr(token), exp_repr)
コード例 #8
0
def iter_variable_assigns(ast):
    from robot.api import Token

    for stack, node in _iter_nodes(ast, recursive=False):
        if node.__class__.__name__ == "KeywordCall":
            for token in node.get_tokens(Token.ASSIGN):
                value = token.value
                i = value.rfind("}")
                if i > 0:
                    new_value = value[: i + 1]
                    token = Token(
                        type=token.type,
                        value=new_value,
                        lineno=token.lineno,
                        col_offset=token.col_offset,
                        error=token.error,
                    )

                yield _TokenInfo(tuple(stack), node, token)
コード例 #9
0
def _tokenize_variables_even_when_invalid(token, col):
    """
    If Token.tokenize_variables() fails, this can still provide the variable under
    the given column by appliying some heuristics to find open variables.
    """
    try:
        return token.tokenize_variables()
    except:
        pass

    # If we got here, it means that we weren't able to tokenize the variables
    # properly (probably some variable wasn't closed properly), so, let's do
    # a custom implementation for this use case.

    from robot.api import Token
    from robotframework_ls.impl.robot_constants import VARIABLE_PREFIXES

    diff = col - token.col_offset
    up_to_cursor = token.value[:diff]
    open_at = up_to_cursor.rfind("{")

    if open_at >= 1:
        if up_to_cursor[open_at - 1] in VARIABLE_PREFIXES:
            varname = [up_to_cursor[open_at - 1 :]]
            from_cursor = token.value[diff:]

            for c in from_cursor:
                if c in VARIABLE_PREFIXES or c.isspace() or c == "{":
                    break
                if c == "}":
                    varname.append(c)
                    break
                varname.append(c)

            return [
                Token(
                    type=token.VARIABLE,
                    value="".join(varname),
                    lineno=token.lineno,
                    col_offset=token.col_offset + open_at - 1,
                    error=token.error,
                )
            ]
コード例 #10
0
def matches_robot_keyword(keyword_name_call_text, keyword_name, _re_cache={}):
    """
    Checks if a given text matches a given keyword. 
    
    Note: both should be already normalized.
    Note: should NOT be called if keyword does not have '{' in it.
    
    :param keyword_name_call_text:
        The call that has resolved variables.
        
    :param keyword_name:
        The keyword (which has variables -- i.e.: '{').
    """

    try:
        compiled = _re_cache[keyword_name]
    except KeyError:
        from robotframework_ls.impl import ast_utils
        from robot.api import Token
        import re

        regexp = []
        for t in ast_utils.tokenize_variables(
                Token(Token.KEYWORD_NAME, keyword_name)):
            if t.type == t.VARIABLE:
                regexp.append("(.*)")
            else:
                regexp.append(re.escape(t.value))

        regexp.append("$")

        regexp = "".join(regexp)
        _re_cache[keyword_name] = re.compile(regexp)
        compiled = _re_cache[keyword_name]

    return compiled.match(keyword_name_call_text)
コード例 #11
0
 def test_types_that_cannot_contain_variables(self):
     for token_type in [Token.VARIABLE, Token.KEYWORD, Token.SEPARATOR]:
         token = Token(token_type, 'Hello, ${var}!', 1, 0)
         assert_equal(list(token.tokenize_variables()), [token])
コード例 #12
0
 def test_types_that_can_contain_variables(self):
     for token_type in [
             Token.NAME, Token.ARGUMENT, Token.TESTCASE_NAME,
             Token.KEYWORD_NAME
     ]:
         token = Token(token_type, 'Nothing to see hear!')
         assert_equal(list(token.tokenize_variables()), [token])
         token = Token(token_type, '${var only}')
         assert_equal(list(token.tokenize_variables()),
                      [Token(Token.VARIABLE, '${var only}')])
         token = Token(token_type, 'Hello, ${var}!', 1, 0)
         assert_equal(list(token.tokenize_variables()), [
             Token(token_type, 'Hello, ', 1, 0),
             Token(Token.VARIABLE, '${var}', 1, 7),
             Token(token_type, '!', 1, 13)
         ])
コード例 #13
0
 def test_automatic_value(self):
     for typ, value in [(Token.IF, 'IF'), (Token.ELSE_IF, 'ELSE IF'),
                        (Token.ELSE, 'ELSE'), (Token.FOR, 'FOR'),
                        (Token.END, 'END'), (Token.CONTINUATION, '...'),
                        (Token.EOL, '\n'), (Token.WITH_NAME, 'WITH NAME')]:
         assert_equal(Token(typ).value, value)
コード例 #14
0
 def __init__(self):
     self.header_end_lines = 2
     self.test_case_sep = 2
     self.empty_line = Statement.from_tokens([Token(Token.EOL, '\n')])
コード例 #15
0
 def insert_seperator(self, iterator):
     for elem in iterator:
         yield elem
         yield Token(Token.SEPARATOR, self.separator * ' ')
コード例 #16
0
 def test_tokenize_variables_is_generator(self):
     variables = Token(Token.NAME, 'Hello, ${var}!').tokenize_variables()
     assert_false(isinstance(variables, list))
     assert_equal(iter(variables), variables)
コード例 #17
0
def create_token(name):
    from robot.api import Token

    return Token(Token.NAME, name)
コード例 #18
0
    def _evaluate(self, code: str) -> ActionResultDict:
        # Compile AST
        from io import StringIO
        from robot.api import Token

        facade = RobotFrameworkFacade()
        get_model = facade.get_model
        TestSuite = facade.TestSuite
        TestDefaults = facade.TestDefaults
        SettingsBuilder = facade.SettingsBuilder
        EXECUTION_CONTEXTS = facade.EXECUTION_CONTEXTS
        SuiteBuilder = facade.SuiteBuilder

        code = self.compute_evaluate_text(code)["full_code"]

        model = get_model(
            StringIO(code),
            data_only=False,
            curdir=os.path.abspath(os.getcwd()).replace("\\", "\\\\"),
        )

        if not model.sections:
            msg = "Unable to interpret: no sections found."
            self.on_stderr(msg)
            return {
                "success": False,
                "message": f"Error while evaluating: {msg}",
                "result": None,
            }

        # Raise an error if there's anything wrong in the model that was parsed.
        _CustomErrorReporter(code).visit(model)

        # Initially it was engineered so that typing *** Settings *** would enter
        # *** Settings *** mode, but this idea was abandoned (it's implementation
        # is still here as we may want to revisit it, but it has some issues
        # in how to compute the full doc for code-completion, so, the default
        # section is always a test-case section now).
        #
        # last_section = model.sections[-1]
        # last_section_name = last_section.__class__.__name__
        # last_section_name = "TestCaseSection"
        # block_mode = self._settings_section_name_to_block_mode.get(last_section_name)
        # if block_mode is None:
        #     self.on_stderr(f"Unable to find block mode for: {last_section_name}")
        #
        # else:
        #     self._last_block_mode_and_indent = block_mode
        #     self._last_section_name = last_section_name

        new_suite = TestSuite(name="Default test suite")
        defaults = TestDefaults()

        SettingsBuilder(new_suite, defaults).visit(model)
        SuiteBuilder(new_suite, defaults).visit(model)

        # ---------------------- handle what was loaded in the settings builder.
        current_context = EXECUTION_CONTEXTS.current
        namespace = current_context.namespace
        source = os.path.join(
            os.path.abspath(os.getcwd()), "in_memory_interpreter.robot"
        )
        for new_import in new_suite.resource.imports:
            new_import.source = source
            # Actually do the import (library, resource, variable)
            namespace._import(new_import)

        if new_suite.resource.variables:
            # Handle variables defined in the current test.
            for variable in new_suite.resource.variables:
                variable.source = source

            namespace.variables.set_from_variable_table(new_suite.resource.variables)

        if new_suite.resource.keywords:
            # It'd be really nice to have a better API for this...
            user_keywords = namespace._kw_store.user_keywords
            for kw in new_suite.resource.keywords:
                kw.actual_source = source
                handler = user_keywords._create_handler(kw)

                embedded = isinstance(handler, facade.EmbeddedArgumentsHandler)
                user_keywords.handlers.add(handler, embedded)

        # --------------------------------------- Actually run any test content.
        for test in new_suite.tests:
            context = EXECUTION_CONTEXTS.current
            facade.run_test_body(context, test)

        # Now, update our representation of the document to include what the
        # user just entered.
        for section in model.sections:
            section_name = section.__class__.__name__
            if section.body:
                if section_name not in self._doc_parts:
                    continue

                current = self._doc_parts[section_name]
                if not current:
                    add = True
                    if section.__class__.__name__ == "TestCaseSection" and (
                        not section.body
                        or (len(section.body) == 1 and not section.body[0].body)
                    ):
                        add = False
                    if add:
                        current = self._doc_parts[section_name] = section
                else:
                    if current.__class__.__name__ == "TestCaseSection":
                        current = current.body[-1]
                        for test_case in section.body:
                            current.body.extend(test_case.body)
                    else:
                        current.body.extend(section.body)

                if current is not None:
                    # Make sure that there is a '\n' as the last EOL.
                    last_in_body = current.body[-1]
                    while not hasattr(last_in_body, "tokens"):
                        last_in_body = last_in_body.body[-1]
                    tokens = last_in_body.tokens
                    last_token = tokens[-1]
                    found_new_line = False
                    if last_token.type == Token.EOL:
                        if not last_token.value:
                            last_token.value = "\n"
                            found_new_line = True
                    if not found_new_line:
                        last_in_body.tokens += (Token("EOL", "\n"),)

        return {"success": True, "message": None, "result": None}