Beispiel #1
0
    def get_ast(self):
        if not self._generate_ast:
            raise AssertionError(
                "The AST can only be accessed in the RobotFrameworkServerApi, not in the RobotFrameworkLanguageServer."
            )
        from robot.api import get_model, get_resource_model, get_init_model  # noqa

        try:
            source = self.source
        except:
            log.exception("Error getting source for: %s" % (self.uri,))
            source = ""

        try:
            t = self.get_type()
            if t == self.TYPE_TEST_CASE:
                return get_model(source)

            elif t == self.TYPE_RESOURCE:
                return get_resource_model(source)

            elif t == self.TYPE_INIT:
                return get_init_model(source)

            else:
                log.critical("Unrecognized section: %s", t)
                return get_model(source)
        except:
            log.critical(f"Error parsing {self.uri}")
            # Note: we always want to return a valid AST here (the
            # AST itself should have the error).
            model = get_model(f"*** Unable to parse: {self.uri} ***")
            return model
    def get_ast(self):
        if not self._generate_ast:
            raise AssertionError(
                "The AST can only be accessed in the RobotFrameworkServerApi, not in the RobotFrameworkLanguageServer."
            )
        from robot.api import get_model, get_resource_model, get_init_model

        try:
            source = self.source
        except:
            log.exception("Error getting source for: %s" % (self.uri,))
            source = ""

        t = self.get_type()
        if t == self.TYPE_TEST_CASE:
            return get_model(source)

        elif t == self.TYPE_RESOURCE:
            return get_resource_model(source)

        elif t == self.TYPE_INIT:
            return get_init_model(source)

        else:
            log.critical("Unrecognized section: %s", t)
            return get_model(source)
Beispiel #3
0
def execute(code: str,
            suite: TestSuite,
            defaults: TestDefaults = TestDefaults()):
    """Execute a snippet of code, given the current test suite."""
    # Compile AST
    model = get_model(
        StringIO(code),
        data_only=False,
        curdir=os.getcwd().replace("\\", "\\\\"),
    )
    ErrorReporter(code).visit(model)
    SettingsBuilder(suite, defaults).visit(model)
    SuiteBuilder(suite, defaults).visit(model)

    # Strip variables/keyword duplicates
    strip_duplicate_items(suite.resource.variables)
    strip_duplicate_items(suite.resource.keywords)

    # Execute suite
    stdout = StringIO()
    with TemporaryDirectory() as path:
        result = suite.run(outputdir=path, stdout=stdout)

    # Remove tests run so far,
    # this is needed so that we don't run them again in the next execution
    clean_items(suite.tests)

    # Detect RPA
    suite.rpa = get_rpa_mode(model)

    return result
Beispiel #4
0
    def _fix_file(self, file: str):
        model = get_model(file)

        if 'capitalize' in self.config.rules:
            KeywordCapitalizer(self.config.rules['capitalize']).visit(model)

        model.save()
def run_check_on_string(in_memory, root='.'):
    config = robocop.Config(root=root)
    robocop_runner = robocop.Robocop(config=config)
    robocop_runner.reload_config()

    ast_model = get_model(in_memory)
    return robocop_runner.run_check(ast_model, r'C:\directory\file.robot',
                                    in_memory)
Beispiel #6
0
 def visit_File(self, node):  # noqa
     if self.align_globally:
         model = get_model(node.source)
         column_widths = CollectColumnWidth(self.start_line, self.end_line)
         column_widths.visit(model)
         column_widths.align_to_tab_size()
         self.global_look_up = column_widths.columns
     self.generic_visit(node)
Beispiel #7
0
def run_check_on_string(in_memory, root="."):
    config = robocop.Config(root=root)
    robocop_runner = robocop.Robocop(config=config)
    robocop_runner.reload_config()

    ast_model = get_model(in_memory)
    file_path = str(Path(Path.home(), "directory", "file.robot"))
    return robocop_runner.run_check(ast_model, file_path, in_memory)
Beispiel #8
0
def build_suite(code: str,
                cell_history: Dict[str, str],
                data_only: bool = False):
    # Init
    suite = TestSuite(name="Jupyter", source=os.getcwd())
    defaults = TestDefaults(None)

    # Populate history
    for historical in cell_history.values():
        ast = get_model(
            StringIO(historical),
            data_only=data_only,
            curdir=os.getcwd().replace("\\", "\\\\"),
        )
        ErrorReporter(historical).visit(ast)
        SettingsBuilder(suite, defaults).visit(ast)
        SuiteBuilder(suite, defaults).visit(ast)

    # Clear historical tests
    suite.tests._items = []

    # Populate current
    ast = get_model(StringIO(code),
                    data_only=data_only,
                    curdir=os.getcwd().replace("\\", "\\\\"))
    ErrorReporter(code).visit(ast)
    SettingsBuilder(suite, defaults).visit(ast)
    SuiteBuilder(suite, defaults).visit(ast)

    # Strip duplicate keywords
    keywords = {}
    for keyword in suite.resource.keywords:
        keywords[keyword.name] = keyword
    suite.resource.keywords._items = list(keywords.values())

    # Strip duplicate variables
    variables = {}
    for variable in suite.resource.variables:
        variables[variable.name] = variable
    suite.resource.variables._items = list(variables.values())

    # Detect RPA
    suite.rpa = _get_rpa_mode(ast)

    return suite
Beispiel #9
0
    def read(self, observer):

        if None != observer:

            observer.msg("reading test suite %s" % (self.name, ))

        self.model = get_model(source=self.path, data_only=False)

        self.read_resources(observer)
Beispiel #10
0
def find_tests(reference, test_suite):

    model = get_model(reference)
    data = TestSuite.from_model(model)

    test_suite[data.name] = []
    for test_case in data.tests:
        test_suite[data.name].append({'test_name': test_case,
                                      'test_source': data.source})
    for child_data in data.suites:
        find_tests(child_data, test_suite)
    return test_suite
    def test_run_check_in_memory(self):
        config = robocop.Config(root='.')

        robocop_runner = robocop.Robocop(config=config)
        robocop_runner.reload_config()
        in_memory = "*** Settings ***\n\n"
        ast_model = get_model(in_memory)
        issues = robocop_runner.run_check(ast_model,
                                          r'C:\directory\file.robot',
                                          in_memory)
        expected_issues = {
            'Missing documentation in suite', 'Section is empty'
        }
        assert all(issue.desc in expected_issues for issue in issues)
Beispiel #12
0
def run_check_on_string(in_memory, include=None, configure=None):
    if include is None:
        include = set()
    if configure is None:
        configure = []
    config = Config(root=str(Path(__file__).parent))
    config.include = include
    config.configure = configure
    robocop_runner = Robocop(config=config)
    robocop_runner.reload_config()

    ast_model = get_model(in_memory)
    return robocop_runner.run_check(ast_model, r"C:\directory\file.robot",
                                    in_memory)
 def test_ignore_sys_argv(self, monkeypatch):
     monkeypatch.setattr("sys.argv", ["robocorp", "--some", "args.robot"])
     config = robocop.Config()
     robocop_runner = robocop.Robocop(config=config)
     robocop_runner.reload_config()
     in_memory = "*** Settings ***\n\n"
     ast_model = get_model(in_memory)
     issues = robocop_runner.run_check(ast_model,
                                       r'C:\directory\file.robot',
                                       in_memory)
     expected_issues = {
         'Missing documentation in suite', 'Section is empty'
     }
     assert all(issue.desc in expected_issues for issue in issues)
Beispiel #14
0
    def test_robocop_api_no_trailing_blank_line_message(self):
        """ Bug from #307 """
        source = "*** Test Cases ***\nTest\n    Fail\n    \nTest\n    Fail\n"
        ast = get_model(source)

        config = robocop.Config()
        robocop_runner = robocop.Robocop(config=config)
        robocop_runner.reload_config()

        issues = robocop_runner.run_check(ast, 'target.robot', source)
        diag_issues = issues_to_lsp_diagnostic(issues)
        assert all(
            d["message"] != "Missing trailing blank line at the end of file"
            for d in diag_issues)
Beispiel #15
0
    def test_run_check_in_memory_with_config(self):
        config_path = Path(
            Path(__file__).parent.parent, 'test_data', 'api_config')
        config = robocop.Config(root=config_path)

        robocop_runner = robocop.Robocop(config=config)
        robocop_runner.reload_config()
        in_memory = "*** Settings ***\n\n"
        ast_model = get_model(in_memory)
        issues = robocop_runner.run_check(ast_model,
                                          r'C:\directory\file.robot',
                                          in_memory)
        issues_by_desc = [issue.desc for issue in issues]
        assert 'Missing documentation in suite' in issues_by_desc
        assert 'Section is empty' not in issues_by_desc
Beispiel #16
0
    def test_run_check_in_memory(self):
        config = robocop.Config(root='.')

        robocop_runner = robocop.Robocop(config=config)
        robocop_runner.reload_config()
        in_memory = "*** Settings ***\n\n"
        ast_model = get_model(in_memory)
        issues = robocop_runner.run_check(ast_model,
                                          r'C:\directory\file.robot',
                                          in_memory)
        expected_issues = {
            'Missing documentation in suite', 'Section is empty',
            'Too many blank lines at the end of file'
        }
        actual_issues = {issue.desc for issue in issues}
        assert expected_issues == actual_issues
Beispiel #17
0
def get_project_stats_3_2_2(source):
    """Project stats for RF 3.2.2 API"""
    proj_data = []
    for subdir, dirs, files in os.walk(source):
        for filename in files:

            filepath = subdir + os.sep + filename
            if filepath.endswith(".resource"):

                resource_model = get_resource_model(filepath)
                kw_section = [
                    section for section in resource_model.sections
                    if section.__class__.__name__ == 'KeywordSection'
                ]
                proj_data.append({
                    'Source':
                    filepath,
                    'File Name':
                    filename,
                    'Keywords':
                    len(kw_section[0].body) if kw_section else 0,
                    'Test Cases':
                    0
                })

            if filepath.endswith(".robot"):
                suite_model = get_model(filepath)
                kw_section = [
                    section for section in suite_model.sections
                    if section.__class__.__name__ == 'KeywordSection'
                ]
                test_section = [
                    section for section in suite_model.sections
                    if section.__class__.__name__ == 'TestCaseSection'
                ]
                proj_data.append({
                    'Source':
                    filepath,
                    'File Name':
                    filename,
                    'Keywords':
                    len(kw_section[0].body) if kw_section else 0,
                    'Test Cases':
                    len(test_section[0].body) if test_section else 0
                })

    return proj_data
Beispiel #18
0
def find_tests(reference, test_suite):

    model = get_model(reference)
    data = TestSuite.from_model(model)

    test_suite[data.name] = []
    # data.tests is a list
    for test_case in data.tests:  # pylint: disable=E1133
        test_suite[data.name].append({
            'test_name': test_case,
            'test_source': data.source
        })

    # data.suites is a list
    for child_data in data.suites:  # pylint: disable=E1133
        find_tests(child_data, test_suite)
    return test_suite
Beispiel #19
0
 def recognize_file_types(self):
     """
     Pre-parse files to recognize their types. If the filename is `__init__.*`, the type is `INIT`.
     Files with .resource extension are `RESOURCE` type.
     If the file is imported somewhere then file type is `RESOURCE`. Otherwise file type is `GENERAL`.
     These types are important since they are used to define parsing class for robot API.
     """
     files = self.config.paths
     for file in self.get_files(files, self.config.recursive):
         if '__init__' in file.name:
             self.files[file] = FileType.INIT
         elif file.suffix.lower() == '.resource':
             self.files[file] = FileType.RESOURCE
         else:
             self.files[file] = FileType.GENERAL
     file_type_checker = FileTypeChecker(self.files, self.config.exec_dir)
     for file in self.files:
         file_type_checker.source = file
         model = get_model(file)
         file_type_checker.visit(model)
def test_ast_print():
    from robotframework_interactive.ast_to_code import ast_to_code
    from robot.api import get_model

    code = ("*** Settings ***\n"
            "Library     lib    WITH NAME    foo\n"
            "\n"
            "*** Comments ***\n"
            "some comment\n"
            "\n"
            "*** Test Cases ***\n"
            "Test\n"
            "        [Documentation]        doc\n"
            "        [Tags]        sometag\n"
            "        Pass\n"
            "        Keyword\n"
            "        One More\n"
            "        Multiline    check1\n"
            "        ...          check2\n")
    model = get_model(code)
    assert ast_to_code(model) == code
Beispiel #21
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('-m', '--modes', required=True)
    parser.add_argument('-p', '--path', required=True)
    parser.add_argument('-l', '--line', default=0, type=int)
    parser.add_argument('-el', '--end-line', default=0, type=int)
    parser.add_argument('-i',
                        '--ignore',
                        help='List of keyword names to ignore')
    parser.add_argument('--indent', default=4, type=int)
    parser.add_argument('--separator', default=4, type=int)
    parser.add_argument('--align-globally', default=False, type=bool)
    args = parser.parse_args()

    formatters = {
        'split':
        SplitToMultiline(args.line, args.end_line, args.separator),
        'rename':
        KeywordRenamer(args.ignore),
        'align':
        AlignSelected(args.line, args.end_line, args.indent,
                      args.align_globally),
        'tabs_to_spaces':
        TabsToSpaces(),
        'whitespace':
        WhitespaceCleanup()
    }
    modes = args.modes.split(',')
    model = get_model(args.path)
    for mode in modes:
        if mode not in formatters:
            print(f'Unrecognized mode: "{mode}". Skipping')
            continue
        formatters[mode].visit(model)

    model.save()
Beispiel #22
0
    def _do_eval(self, debugger_impl):
        frame_id = self.frame_id
        stack_info = debugger_impl._get_stack_info_from_frame_id(frame_id)

        if stack_info is None:
            raise InvalidFrameIdError(
                "Unable to find frame id for evaluation: %s" % (frame_id, ))

        info = stack_info._frame_id_to_frame_info.get(frame_id)
        if info is None:
            raise InvalidFrameIdError(
                "Unable to find frame info for evaluation: %s" % (frame_id, ))

        if not isinstance(info, _KeywordFrameInfo):
            raise InvalidFrameTypeError(
                "Can only evaluate at a Keyword context (current context: %s)"
                % (info.get_type_name(), ))
        log.info("Doing evaluation in the Keyword context: %s", info.name)

        from robotframework_ls.impl.text_utilities import is_variable_text

        from robot.libraries.BuiltIn import BuiltIn  # type: ignore
        from robot.api import get_model  # type: ignore
        from robotframework_ls.impl import ast_utils

        # We can't really use
        # BuiltIn().evaluate(expression, modules, namespace)
        # because we can't set the variable_store used with it
        # (it always uses the latest).

        variable_store = info.variables.store

        if is_variable_text(self.expression):
            try:
                value = variable_store[self.expression[2:-1]]
            except Exception:
                pass
            else:
                return EvaluationResult(value)

        # Do we want this?
        # from robot.variables.evaluation import evaluate_expression
        # try:
        #     result = evaluate_expression(self.expression, variable_store)
        # except Exception:
        #     log.exception()
        # else:
        #     return EvaluationResult(result)

        # Try to check if it's a KeywordCall.
        s = """
*** Test Cases ***
Evaluation
    %s
""" % (self.expression, )
        model = get_model(s)
        usage_info = list(ast_utils.iter_keyword_usage_tokens(model))
        if len(usage_info) == 1:
            _stack, node, _token, name = next(iter(usage_info))

            dap_frames = stack_info.dap_frames
            if dap_frames:
                top_frame_id = dap_frames[0].id
                if top_frame_id != frame_id:
                    if get_log_level() >= 2:
                        log.debug(
                            "Unable to evaluate.\nFrame id for evaluation: %r\nTop frame id: %r.\nDAP frames:\n%s",
                            frame_id,
                            top_frame_id,
                            "\n".join(x.to_json() for x in dap_frames),
                        )

                    raise UnableToEvaluateError(
                        "Keyword calls may only be evaluated at the topmost frame."
                    )

                return EvaluationResult(BuiltIn().run_keyword(
                    name, *node.args))

        raise UnableToEvaluateError("Unable to evaluate: %s" %
                                    (self.expression, ))
 def test_from_model_with_custom_name(self):
     for source in [self.data, self.path]:
         model = api.get_model(source)
         suite = TestSuite.from_model(model, name='Custom name')
         self._verify_suite(suite, 'Custom name')
 def test_from_model_containing_source(self):
     model = api.get_model(self.path)
     suite = TestSuite.from_model(model)
     self._verify_suite(suite)
 def test_from_model(self):
     model = api.get_model(self.data)
     suite = TestSuite.from_model(model)
     self._verify_suite(suite, name='')
def _execute_impl(code: str,
                  suite: TestSuite,
                  defaults: TestDefaults = TestDefaults(),
                  stdout=None,
                  stderr=None,
                  listeners=[],
                  drivers=[],
                  outputdir=None,
                  interactive_keywords=True):
    # Clear selector completion highlights
    for driver in yield_current_connection(
            drivers, ["RPA.Browser", "selenium", "jupyter"]):
        try:
            clear_selector_highlights(driver)
        except BrokenOpenConnection:
            close_current_connection(drivers, driver)

    # Copy keywords/variables/libraries in case of failure
    imports = get_items_copy(suite.resource.imports)
    variables = get_items_copy(suite.resource.variables)
    keywords = get_items_copy(suite.resource.keywords)

    # Compile AST
    model = get_model(
        StringIO(code),
        data_only=False,
        curdir=os.getcwd().replace("\\", "\\\\"),
    )
    ErrorReporter(code).visit(model)
    SettingsBuilder(suite, defaults).visit(model)
    SuiteBuilder(suite, defaults).visit(model)

    # Strip variables/keyword duplicates
    strip_duplicate_items(suite.resource.variables)
    strip_duplicate_items(suite.resource.keywords)

    new_imports = [
        item for item in get_items_copy(suite.resource.imports)
        if item not in imports
    ]
    for new_import in new_imports:
        new_import.source = suite.source
    new_variables = [
        item for item in get_items_copy(suite.resource.variables)
        if item not in variables
    ]
    for new_variable in new_variables:
        new_variable.source = suite.source
    # If there is no test, allow the user to interact with defined keywords by providing widgets
    new_keywords = [
        item for item in get_items_copy(suite.resource.keywords)
        if item not in keywords
    ]
    for new_keyword in new_keywords:
        new_keyword.actual_source = suite.source
    if not suite.tests and new_keywords and interactive_keywords:
        return None, [
            get_interactive_keyword(suite, keyword) for keyword in new_keywords
        ]

    # Set default streams
    # By default stdout is no-op
    # By default stderr raises an exception when flushing (workaround robotframework which does not raise)
    if stdout is None:
        stdout = NoOpStream()
    if stderr is None:
        stderr = ErrorStream()

    # Execute suite
    try:
        result = suite.run(outputdir=outputdir,
                           stdout=stdout,
                           stderr=stderr,
                           listener=listeners)
    except TestSuiteError as e:
        # Reset keywords/variables/libraries
        set_items(suite.resource.imports, imports)
        set_items(suite.resource.variables, variables)
        set_items(suite.resource.keywords, keywords)

        clean_items(suite.tests)

        raise e

    for listener in listeners:
        if isinstance(listener, RobotKeywordsIndexerListener):
            listener.import_from_suite_data(suite)

    # Detect RPA
    suite.rpa = get_rpa_mode(model)

    report = None
    if suite.tests:
        report = generate_report(suite, outputdir)

    # Remove tests run so far,
    # this is needed so that we don't run them again in the next execution
    clean_items(suite.tests)

    return result, report
def _execute_impl(code: str,
                  suite: TestSuite,
                  defaults: TestDefaults = TestDefaults(),
                  stdout=None,
                  stderr=None,
                  listeners=[],
                  drivers=[],
                  outputdir=None,
                  interactive_keywords=True,
                  logger=None):
    # This will help raise runtime exceptions
    traceback = []
    LOGGER.register_error_listener(
        lambda: traceback.extend(get_error_details()))

    # Clear selector completion highlights
    for driver in yield_current_connection(
            drivers, SeleniumConnectionsListener.NAMES + ["jupyter"]):
        try:
            clear_selector_highlights(driver)
        except BrokenOpenConnection:
            close_current_connection(drivers, driver)

    if logger is not None:
        logger.debug("Compiling code: \n%s", code)

    # Copy keywords/variables/libraries in case of failure
    imports = get_items_copy(suite.resource.imports)
    variables = get_items_copy(suite.resource.variables)
    keywords = get_items_copy(suite.resource.keywords)

    # Compile AST
    model = get_model(
        StringIO(code),
        data_only=False,
        curdir=os.getcwd().replace("\\", "\\\\"),
    )
    ErrorReporter(code).visit(model)
    SettingsBuilder(suite, defaults).visit(model)
    SuiteBuilder(suite, defaults).visit(model)

    # Strip variables/keyword duplicates
    strip_duplicate_items(suite.resource.variables)
    strip_duplicate_items(suite.resource.keywords)

    for listener in listeners:
        # Notify suite variables to the listener
        if isinstance(listener, GlobalVarsListener):
            listener.suite_vars = [
                var.name for var in suite.resource.variables
            ]

    new_imports = [
        item for item in get_items_copy(suite.resource.imports)
        if item not in imports
    ]
    for new_import in new_imports:
        new_import.source = suite.source
    new_variables = [
        item for item in get_items_copy(suite.resource.variables)
        if item not in variables
    ]
    for new_variable in new_variables:
        new_variable.source = suite.source
    # If there is no test, allow the user to interact with defined keywords by providing widgets
    new_keywords = [
        item for item in get_items_copy(suite.resource.keywords)
        if item not in keywords
    ]
    for new_keyword in new_keywords:
        new_keyword.actual_source = suite.source
    if not suite.tests and new_keywords and interactive_keywords:
        return None, [
            get_interactive_keyword(
                suite,
                keyword,
                # stdout=stdout, stderr=stderr,
                listeners=[
                    listener for listener in listeners
                    if not isinstance(listener, StatusEventListener)
                ],
                drivers=drivers,
                logger=logger) for keyword in new_keywords
        ]

    # Set default streams
    # By default stdout is no-op
    if stdout is None:
        stdout = NoOpStream()

    if logger is not None:
        logger.debug("Executing code")

    # Execute suite
    result = suite.run(outputdir=outputdir,
                       stdout=stdout,
                       stderr=stderr,
                       listener=listeners)

    if len(traceback) != 0:
        # Reset keywords/variables/libraries
        set_items(suite.resource.imports, imports)
        set_items(suite.resource.variables, variables)
        set_items(suite.resource.keywords, keywords)

        clean_items(suite.tests)

        error_msg = '\n'.join(traceback)

        if logger is not None:
            logger.debug("Execution error: %s", error_msg)

        raise TestSuiteError(error_msg)

    for listener in listeners:
        if isinstance(listener, RobotKeywordsIndexerListener):
            listener.import_from_suite_data(suite)

    # Detect RPA
    suite.rpa = get_rpa_mode(model)

    report = None
    if suite.tests:
        report = generate_report(suite, outputdir)

    # Remove tests run so far,
    # this is needed so that we don't run them again in the next execution
    clean_items(suite.tests)

    return result, report
def detect_from_file(file):
    model = get_model(file)
    detector = AssignmentTypeDetector()
    detector.visit(model)
    return detector.keyword_most_common, detector.variables_most_common
from robot.api import get_model
import sys
import re

try:
    model = get_model(sys.argv[1])
except IndexError:
    model = get_model("test.robot")

setting_section = model.sections[0]
setting_section = [
    x.body for x in model.sections
    if x.header.tokens[0].type == "SETTING_HEADER"
][0]

imported_libs = [x.tokens for x in setting_section]
imported_libs = [[x for x in x if x.type == "NAME"][0].value
                 for x in imported_libs]
imported_libs = [
    re.search(r"RobotLib\\\\(.*)\.py", x).group(1) for x in imported_libs
    if re.search(r"RobotLib\\\\(.*)\.py", x)
]

with open("requirements.txt", "w") as f:
    f.write("\n".join(imported_libs))