def test_print_ast(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl import ast_utils

    try:
        from StringIO import StringIO
    except ImportError:
        from io import StringIO

    doc = RobotDocument("unused", source="*** settings ***")
    s = StringIO()
    ast_utils.print_ast(doc.get_ast(), stream=s)
    assert [
        x.replace("SETTING HEADER", "SETTING_HEADER")
        for x in s.getvalue().splitlines()
    ] in (
        [
            "  File                                               (0, 0) -> (0, 16)",
            "    SettingSection                                   (0, 0) -> (0, 16)",
            "      SettingSectionHeader                           (0, 0) -> (0, 16)",
            "      - SETTING_HEADER, '*** settings ***'                  (0, 0->16)",
            "      - EOL, ''                                            (0, 16->16)",
        ],
        [  # version 4.0.4 onwards
            "  File                                               (0, 0) -> (0, 16)",
            "    SettingSection                                   (0, 0) -> (0, 16)",
            "      SectionHeader                                  (0, 0) -> (0, 16)",
            "      - SETTING_HEADER, '*** settings ***'                  (0, 0->16)",
            "      - EOL, ''                                            (0, 16->16)",
        ],
    )
def test_complete_from_context(rf_server_api, libspec_manager, tmpdir):
    from robotframework_ls.impl.completion_context import CompletionContext
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl.robot_workspace import RobotWorkspace
    from robocorp_ls_core.watchdog_wrapper import create_observer

    api = rf_server_api

    text = """*** Task ***
Some task
    Log    Something     console=True
    Lo"""

    doc = RobotDocument("", text)

    line, col = doc.get_last_line_col()
    workspace = RobotWorkspace(
        str(tmpdir),
        fs_observer=create_observer("dummy", []),
        libspec_manager=libspec_manager,
    )
    completion_context = CompletionContext(doc, line, col, workspace=workspace)

    for completion in api._complete_from_completion_context(completion_context):
        if completion["label"] == "Log":
            break
    else:
        raise AssertionError('Did not find "Log" entry in completions.')
def test_parse_errors(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl.ast_utils import collect_errors

    source = """*** Settings ***
Documentation     A test suite with a single test for valid login.
...
...               This test has a workflow that is created using keywords in
...               the imported resource file.
Resource          resource.txt

"test"

*** Invalid Invalid Invalid ***
    Something

*** Test Cases ***
Valid Login
    Open Browser To Login Page
    Input Username    demo
    Input Password    mode
    Submit Credentials
    Welcome Page Should Be Open
    [Teardown]    Close Browser"""

    doc = RobotDocument("unsaved", source)
    errors = collect_errors(doc.get_ast())

    data_regression.check([e.to_dict() for e in errors], basename="errors")

    data_regression.check([e.to_lsp_diagnostic() for e in errors],
                          basename="lsp_diagnostic")

    assert repr(errors)  # Just check that it works.
def test_section_completions(data_regression):
    from robotframework_ls.impl import section_completions
    from robotframework_ls.impl.completion_context import CompletionContext
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.config.config import Config

    config = Config(root_uri="", init_opts={}, process_id=-1, capabilities={})
    config.update(
        {"robot": {
            "completions": {
                "section_headers": {
                    "form": "both"
                }
            }
        }})

    doc = RobotDocument("unused", source="""**""")
    completions = section_completions.complete(
        CompletionContext(doc, config=config))
    data_regression.check(completions, basename="header_completions_all")

    doc = RobotDocument("unused", source="""**settin""")
    completions = section_completions.complete(
        CompletionContext(doc, config=config))
    data_regression.check(completions,
                          basename="header_completions_filter_settings")

    config.update({})
    doc = RobotDocument("unused", source="""**""")
    completions = section_completions.complete(
        CompletionContext(doc, config=config))
    data_regression.check(completions,
                          basename="header_completions_all_plural")
def test_print_ast(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl import ast_utils

    try:
        from StringIO import StringIO
    except ImportError:
        from io import StringIO

    doc = RobotDocument("unused", source="*** settings ***")
    s = StringIO()
    ast_utils.print_ast(doc.get_ast(), stream=s)
    data_regression.check(s.getvalue().splitlines())
def test_iter_nodes():
    from robotframework_ls.impl import ast_utils
    from robotframework_ls.impl.robot_workspace import RobotDocument

    doc = RobotDocument(
        "unused", source="*** settings ***\nResource    my_resource.resource")
    lst = []
    for stack, node in ast_utils._iter_nodes(doc.get_ast()):
        lst.append("%s - %s" % ([s.__class__.__name__
                                 for s in stack], node.__class__.__name__))
    assert lst == [
        "[] - SettingSection",
        "['SettingSection'] - SettingSectionHeader",
        "['SettingSection'] - ResourceImport",
    ]
Beispiel #7
0
def test_parse_errors_for(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl.ast_utils import collect_errors
    from robotframework_ls.impl import ast_utils

    source = """
*** Test Cases ***
Invalid END
    FOR    ${var}    IN    one    two
        Fail    Not executed
"""

    doc = RobotDocument("unsaved", source)
    ast_utils.print_ast(doc.get_ast())
    errors = collect_errors(doc.get_ast())

    data_regression.check([e.to_dict() for e in errors], basename="errors_for")
Beispiel #8
0
def test_parse_errors_if(data_regression):
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl.ast_utils import collect_errors
    from robotframework_ls.impl import ast_utils

    source = """
*** Test Cases ***
If without end
    IF  ${True}
       No Operation
"""

    doc = RobotDocument("unsaved", source)
    ast_utils.print_ast(doc.get_ast())
    errors = collect_errors(doc.get_ast())

    data_regression.check([e.to_dict() for e in errors], basename="errors_if")
def test_snippets_completions(data_regression):
    from robotframework_ls.impl.completion_context import CompletionContext
    from robotframework_ls.impl.robot_workspace import RobotDocument
    from robotframework_ls.impl import snippets_completions

    doc = RobotDocument("unused", source="""for""")
    completions = snippets_completions.complete(CompletionContext(doc))

    data_regression.check(completions)
Beispiel #10
0
def test_section_name_keywords_completions(data_regression):
    from robotframework_ls.impl import section_name_completions
    from robotframework_ls.impl.completion_context import CompletionContext
    from robotframework_ls.impl.robot_workspace import RobotDocument

    doc = RobotDocument(
        "unused",
        source="""
*** keywords ***

""",
    )
    completions = section_name_completions.complete(CompletionContext(doc))
    data_regression.check(completions, basename="keywords_no_names")

    doc = RobotDocument(
        "unused",
        source="""
*** keywords ***
[""",
    )
    completions = section_name_completions.complete(CompletionContext(doc))
    data_regression.check(completions, basename="keywords_names")

    doc = RobotDocument(
        "unused",
        source="""
*** keywords ***
[Docum""",
    )
    completions = section_name_completions.complete(CompletionContext(doc))
    data_regression.check(completions, basename="keywords_docum_names")

    doc = RobotDocument(
        "unused",
        source="""
*** keywords ***
[Docum]""",
    )
    line, col = doc.get_last_line_col()
    completions = section_name_completions.complete(
        CompletionContext(doc, line=line, col=col - 1)
    )
    data_regression.check(completions, basename="keywords_docum_names2")
Beispiel #11
0
def test_section_name_settings_completions(data_regression):
    from robotframework_ls.impl import section_name_completions
    from robotframework_ls.impl.completion_context import CompletionContext
    from robotframework_ls.impl.robot_workspace import RobotDocument

    doc = RobotDocument(
        "unused",
        source="""
*** Settings ***

""",
    )
    completions = section_name_completions.complete(CompletionContext(doc))
    data_regression.check(completions, basename="settings_names")

    doc = RobotDocument(
        "unused",
        source="""
*** Settings ***

Docum""",
    )
    completions = section_name_completions.complete(CompletionContext(doc))
    data_regression.check(completions, basename="settings_docum_names")
Beispiel #12
0
    def threaded_monaco_completions_from_code_full(
        self,
        prefix: str,
        full_code: str,
        position: PositionTypedDict,
        uri: str,
        indent: str,
        monitor: Optional[IMonitor] = None,
    ):
        from robotframework_ls.impl.robot_workspace import RobotDocument
        from robotframework_ls.impl.completion_context import CompletionContext
        from robocorp_ls_core.workspace import Document
        from robotframework_ls.impl import section_completions
        from robotframework_ls.impl import snippets_completions
        from robotframework_ls.server_api.monaco_conversions import (
            convert_to_monaco_completion,
        )
        from robotframework_ls.impl.completion_context import CompletionType

        d = Document(uri, prefix)
        last_line, _last_col = d.get_last_line_col()
        line = last_line + position["line"]

        col = position["character"]
        col += len(indent)

        document = RobotDocument(uri, full_code)
        completion_context = CompletionContext(
            document,
            line,
            col,
            config=self.config,
            monitor=monitor,
            workspace=self.workspace,
        )
        completion_context.type = CompletionType.shell
        completions = self._complete_from_completion_context(completion_context)
        completions.extend(section_completions.complete(completion_context))
        completions.extend(snippets_completions.complete(completion_context))

        return {
            "suggestions": [
                convert_to_monaco_completion(
                    c, line_delta=last_line, col_delta=len(indent), uri=uri
                )
                for c in completions
            ]
        }
def test_get_ast():
    from robotframework_ls.impl.robot_workspace import RobotDocument

    d = RobotDocument(uri="unkwown", source="*** Settings ***")
    ast = d.get_ast()
    assert ast is not None
    assert d.get_ast() is ast  # Check cache

    d.source = "*** Foobar"
    assert d.get_ast() is not ast
Beispiel #14
0
def test_customize_interpreter_add_plugins_dir(
        language_server_io: ILanguageServerClient, workspace_dir: str,
        cases: CasesFixture):
    from robocorp_ls_core import uris
    import os
    from pathlib import Path
    from robotframework_ls.impl.robot_workspace import RobotDocument

    language_server = language_server_io

    cases.copy_to("custom_env", workspace_dir)

    language_server.initialize(workspace_dir, process_id=os.getpid())
    case1_robot: Path = Path(workspace_dir) / "env1" / "caselib1.robot"
    assert case1_robot.exists()
    uri_case1 = uris.from_fs_path(str(case1_robot))

    doc = RobotDocument(uri_case1)
    i_line = doc.find_line_with_contents("    verify lib1")

    language_server.open_doc(uri_case1, 1)

    ret = language_server.find_definitions(uri_case1, i_line, 6)
    result = ret["result"]
    assert not result

    # Now, customize it with the plugins.
    plugins_dir = cases.get_path("custom_env/plugins")

    add_plugins_result = language_server.execute_command(
        "robot.addPluginsDir", [plugins_dir])
    assert add_plugins_result["result"]

    ret = language_server.find_definitions(uri_case1, i_line, 6)
    result = ret["result"]
    assert result
    check = next(iter(result))
    assert check["uri"].endswith("lib1.py")

    # Check with another case
    case2_robot: Path = Path(workspace_dir) / "env2" / "caselib2.robot"
    assert case2_robot.exists()
    uri_case2 = uris.from_fs_path(str(case2_robot))
    doc = RobotDocument(uri_case2)
    i_line = doc.find_line_with_contents("    verify lib2")
    ret = language_server.find_definitions(uri_case2, i_line, 6)
    result = ret["result"]
    assert result
    check = next(iter(result))
    assert check["uri"].endswith("lib2.py")
Beispiel #15
0
    def threaded_semantic_tokens_from_code_full(
        self,
        prefix: str,
        full_code: str,
        indent: str,
        monitor: Optional[IMonitor] = None,
    ):
        from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast

        try:
            from robotframework_ls.impl.robot_workspace import RobotDocument

            doc = RobotDocument("")
            doc.source = full_code
            ast = doc.get_ast()
            data = semantic_tokens_full_from_ast(ast, monitor)
            if not prefix:
                return {"resultId": None, "data": data}

            # We have to exclude the prefix from the coloring...

            # debug info...
            # import io
            # from robotframework_ls.impl.semantic_tokens import decode_semantic_tokens
            # stream = io.StringIO()
            # decode_semantic_tokens(data, doc, stream)
            # found = stream.getvalue()

            prefix_doc = RobotDocument("")
            prefix_doc.source = prefix
            last_line, last_col = prefix_doc.get_last_line_col()

            # Now we have the data from the full code, but we need to remove whatever
            # we have in the prefix from the result...
            ints_iter = iter(data)
            line = 0
            col = 0
            new_data = []
            indent_len = len(indent)
            while True:
                try:
                    line_delta = next(ints_iter)
                except StopIteration:
                    break
                col_delta = next(ints_iter)
                token_len = next(ints_iter)
                token_type = next(ints_iter)
                token_modifier = next(ints_iter)
                line += line_delta
                if line_delta == 0:
                    col += col_delta
                else:
                    col = col_delta

                if line >= last_line:
                    new_data.append(line - last_line)
                    new_data.append(col_delta - indent_len)
                    new_data.append(token_len)
                    new_data.append(token_type)
                    new_data.append(token_modifier)

                    # Ok, now, we have to add the indent_len to all the
                    # next lines
                    while True:
                        try:
                            line_delta = next(ints_iter)
                        except StopIteration:
                            break
                        col_delta = next(ints_iter)
                        token_len = next(ints_iter)
                        token_type = next(ints_iter)
                        token_modifier = next(ints_iter)

                        new_data.append(line_delta)
                        if line_delta > 0:
                            new_data.append(col_delta - indent_len)
                        else:
                            new_data.append(col_delta)
                        new_data.append(token_len)
                        new_data.append(token_type)
                        new_data.append(token_modifier)

                    break

                # Approach changed so that we always have a new line
                # i.e.:
                # \n<indent><code>
                #
                # so, the condition below no longer applies.
                # elif line == last_line and col >= last_col:
                #     new_data.append(0)
                #     new_data.append(col - last_col)
                #     new_data.append(token_len)
                #     new_data.append(token_type)
                #     new_data.append(token_modifier)
                #     new_data.extend(ints_iter)
                #     break

            # debug info...
            # temp_stream = io.StringIO()
            # temp_doc = RobotDocument("")
            # temp_doc.source = full_code[len(prefix) :]
            # decode_semantic_tokens(new_data, temp_doc, temp_stream)
            # temp_found = temp_stream.getvalue()

            return {"resultId": None, "data": new_data}
        except:
            log.exception("Error computing semantic tokens from code.")
            return {"resultId": None, "data": []}