def test_get_ast():
    from robotframework_ls.impl.robot_workspace import RobotDocument

    d = RobotDocument(uri="unkwown", source="*** Settings ***")
    ast = d.get_ast()
    assert ast is not None
    assert d.get_ast() is ast  # Check cache

    d.source = "*** Foobar"
    assert d.get_ast() is not ast
Beispiel #2
0
    def threaded_semantic_tokens_from_code_full(
        self,
        prefix: str,
        full_code: str,
        indent: str,
        monitor: Optional[IMonitor] = None,
    ):
        from robotframework_ls.impl.semantic_tokens import semantic_tokens_full_from_ast

        try:
            from robotframework_ls.impl.robot_workspace import RobotDocument

            doc = RobotDocument("")
            doc.source = full_code
            ast = doc.get_ast()
            data = semantic_tokens_full_from_ast(ast, monitor)
            if not prefix:
                return {"resultId": None, "data": data}

            # We have to exclude the prefix from the coloring...

            # debug info...
            # import io
            # from robotframework_ls.impl.semantic_tokens import decode_semantic_tokens
            # stream = io.StringIO()
            # decode_semantic_tokens(data, doc, stream)
            # found = stream.getvalue()

            prefix_doc = RobotDocument("")
            prefix_doc.source = prefix
            last_line, last_col = prefix_doc.get_last_line_col()

            # Now we have the data from the full code, but we need to remove whatever
            # we have in the prefix from the result...
            ints_iter = iter(data)
            line = 0
            col = 0
            new_data = []
            indent_len = len(indent)
            while True:
                try:
                    line_delta = next(ints_iter)
                except StopIteration:
                    break
                col_delta = next(ints_iter)
                token_len = next(ints_iter)
                token_type = next(ints_iter)
                token_modifier = next(ints_iter)
                line += line_delta
                if line_delta == 0:
                    col += col_delta
                else:
                    col = col_delta

                if line >= last_line:
                    new_data.append(line - last_line)
                    new_data.append(col_delta - indent_len)
                    new_data.append(token_len)
                    new_data.append(token_type)
                    new_data.append(token_modifier)

                    # Ok, now, we have to add the indent_len to all the
                    # next lines
                    while True:
                        try:
                            line_delta = next(ints_iter)
                        except StopIteration:
                            break
                        col_delta = next(ints_iter)
                        token_len = next(ints_iter)
                        token_type = next(ints_iter)
                        token_modifier = next(ints_iter)

                        new_data.append(line_delta)
                        if line_delta > 0:
                            new_data.append(col_delta - indent_len)
                        else:
                            new_data.append(col_delta)
                        new_data.append(token_len)
                        new_data.append(token_type)
                        new_data.append(token_modifier)

                    break

                # Approach changed so that we always have a new line
                # i.e.:
                # \n<indent><code>
                #
                # so, the condition below no longer applies.
                # elif line == last_line and col >= last_col:
                #     new_data.append(0)
                #     new_data.append(col - last_col)
                #     new_data.append(token_len)
                #     new_data.append(token_type)
                #     new_data.append(token_modifier)
                #     new_data.extend(ints_iter)
                #     break

            # debug info...
            # temp_stream = io.StringIO()
            # temp_doc = RobotDocument("")
            # temp_doc.source = full_code[len(prefix) :]
            # decode_semantic_tokens(new_data, temp_doc, temp_stream)
            # temp_found = temp_stream.getvalue()

            return {"resultId": None, "data": new_data}
        except:
            log.exception("Error computing semantic tokens from code.")
            return {"resultId": None, "data": []}