def test_get_ast():
    from robotframework_ls.impl.robot_workspace import RobotDocument

    d = RobotDocument(uri="unkwown", source="*** Settings ***")
    ast = d.get_ast()
    assert ast is not None
    assert d.get_ast() is ast  # Check cache

    d.source = "*** Foobar"
    assert d.get_ast() is not ast
def test_print_ast(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl import ast_utils

    try:
        from StringIO import StringIO
    except ImportError:
        from io import StringIO

    doc = RobotDocument("unused", source="*** settings ***")
    s = StringIO()
    ast_utils.print_ast(doc.get_ast(), stream=s)
    assert [
        x.replace("SETTING HEADER", "SETTING_HEADER")
        for x in s.getvalue().splitlines()
    ] in (
        [
            "  File                                               (0, 0) -> (0, 16)",
            "    SettingSection                                   (0, 0) -> (0, 16)",
            "      SettingSectionHeader                           (0, 0) -> (0, 16)",
            "      - SETTING_HEADER, '*** settings ***'                  (0, 0->16)",
            "      - EOL, ''                                            (0, 16->16)",
        ],
        [  # version 4.0.4 onwards
            "  File                                               (0, 0) -> (0, 16)",
            "    SettingSection                                   (0, 0) -> (0, 16)",
            "      SectionHeader                                  (0, 0) -> (0, 16)",
            "      - SETTING_HEADER, '*** settings ***'                  (0, 0->16)",
            "      - EOL, ''                                            (0, 16->16)",
        ],
    )
def test_parse_errors(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl.ast_utils import collect_errors

    source = """*** Settings ***
Documentation     A test suite with a single test for valid login.
...
...               This test has a workflow that is created using keywords in
...               the imported resource file.
Resource          resource.txt

"test"

*** Invalid Invalid Invalid ***
    Something

*** Test Cases ***
Valid Login
    Open Browser To Login Page
    Input Username    demo
    Input Password    mode
    Submit Credentials
    Welcome Page Should Be Open
    [Teardown]    Close Browser"""

    doc = RobotDocument("unsaved", source)
    errors = collect_errors(doc.get_ast())

    data_regression.check([e.to_dict() for e in errors], basename="errors")

    data_regression.check([e.to_lsp_diagnostic() for e in errors],
                          basename="lsp_diagnostic")

    assert repr(errors)  # Just check that it works.
Exemple #4
0
def test_parse_errors_for(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl.ast_utils import collect_errors
    from robotframework_ls.impl import ast_utils

    source = """
*** Test Cases ***
Invalid END
    FOR    ${var}    IN    one    two
        Fail    Not executed
"""

    doc = RobotDocument("unsaved", source)
    ast_utils.print_ast(doc.get_ast())
    errors = collect_errors(doc.get_ast())

    data_regression.check([e.to_dict() for e in errors], basename="errors_for")
Exemple #5
0
def test_parse_errors_if(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl.ast_utils import collect_errors
    from robotframework_ls.impl import ast_utils

    source = """
*** Test Cases ***
If without end
    IF  ${True}
       No Operation
"""

    doc = RobotDocument("unsaved", source)
    ast_utils.print_ast(doc.get_ast())
    errors = collect_errors(doc.get_ast())

    data_regression.check([e.to_dict() for e in errors], basename="errors_if")
def test_print_ast(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl import ast_utils

    try:
        from StringIO import StringIO
    except ImportError:
        from io import StringIO

    doc = RobotDocument("unused", source="*** settings ***")
    s = StringIO()
    ast_utils.print_ast(doc.get_ast(), stream=s)
    data_regression.check(s.getvalue().splitlines())
def test_iter_nodes():
    from robotframework_ls.impl import ast_utils
    from robotframework_ls.impl.robot_workspace import RobotDocument

    doc = RobotDocument(
        "unused", source="*** settings ***\nResource    my_resource.resource")
    lst = []
    for stack, node in ast_utils._iter_nodes(doc.get_ast()):
        lst.append("%s - %s" % ([s.__class__.__name__
                                 for s in stack], node.__class__.__name__))
    assert lst == [
        "[] - SettingSection",
        "['SettingSection'] - SettingSectionHeader",
        "['SettingSection'] - ResourceImport",
    ]
    def threaded_semantic_tokens_from_code_full(
        self,
        prefix: str,
        full_code: str,
        indent: str,
        monitor: Optional[IMonitor] = None,
    ):
        from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast

        try:
            from robotframework_ls.impl.robot_workspace import RobotDocument

            doc = RobotDocument("")
            doc.source = full_code
            ast = doc.get_ast()
            data = semantic_tokens_full_from_ast(ast, monitor)
            if not prefix:
                return {"resultId": None, "data": data}

            # We have to exclude the prefix from the coloring...

            # debug info...
            # import io
            # from robotframework_ls.impl.semantic_tokens import decode_semantic_tokens
            # stream = io.StringIO()
            # decode_semantic_tokens(data, doc, stream)
            # found = stream.getvalue()

            prefix_doc = RobotDocument("")
            prefix_doc.source = prefix
            last_line, last_col = prefix_doc.get_last_line_col()

            # Now we have the data from the full code, but we need to remove whatever
            # we have in the prefix from the result...
            ints_iter = iter(data)
            line = 0
            col = 0
            new_data = []
            indent_len = len(indent)
            while True:
                try:
                    line_delta = next(ints_iter)
                except StopIteration:
                    break
                col_delta = next(ints_iter)
                token_len = next(ints_iter)
                token_type = next(ints_iter)
                token_modifier = next(ints_iter)
                line += line_delta
                if line_delta == 0:
                    col += col_delta
                else:
                    col = col_delta

                if line >= last_line:
                    new_data.append(line - last_line)
                    new_data.append(col_delta - indent_len)
                    new_data.append(token_len)
                    new_data.append(token_type)
                    new_data.append(token_modifier)

                    # Ok, now, we have to add the indent_len to all the
                    # next lines
                    while True:
                        try:
                            line_delta = next(ints_iter)
                        except StopIteration:
                            break
                        col_delta = next(ints_iter)
                        token_len = next(ints_iter)
                        token_type = next(ints_iter)
                        token_modifier = next(ints_iter)

                        new_data.append(line_delta)
                        if line_delta > 0:
                            new_data.append(col_delta - indent_len)
                        else:
                            new_data.append(col_delta)
                        new_data.append(token_len)
                        new_data.append(token_type)
                        new_data.append(token_modifier)

                    break

                # Approach changed so that we always have a new line
                # i.e.:
                # \n<indent><code>
                #
                # so, the condition below no longer applies.
                # elif line == last_line and col >= last_col:
                #     new_data.append(0)
                #     new_data.append(col - last_col)
                #     new_data.append(token_len)
                #     new_data.append(token_type)
                #     new_data.append(token_modifier)
                #     new_data.extend(ints_iter)
                #     break

            # debug info...
            # temp_stream = io.StringIO()
            # temp_doc = RobotDocument("")
            # temp_doc.source = full_code[len(prefix) :]
            # decode_semantic_tokens(new_data, temp_doc, temp_stream)
            # temp_found = temp_stream.getvalue()

            return {"resultId": None, "data": new_data}
        except:
            log.exception("Error computing semantic tokens from code.")
            return {"resultId": None, "data": []}