def test_matches_robot_keyword(): from robotframework_ls.impl.text_utilities import matches_robot_keyword from robotframework_ls.impl.text_utilities import normalize_robot_name keyword_name_call_text = 'I execute "ls"' keyword_name = 'I execute "${cmd:[^"]+}"' assert matches_robot_keyword(normalize_robot_name(keyword_name_call_text), normalize_robot_name(keyword_name)) keyword_name_call_text = 'I execute "ls" f' assert not matches_robot_keyword( normalize_robot_name(keyword_name_call_text), normalize_robot_name(keyword_name)) keyword_name_call_text = 'f I execute "ls"' assert not matches_robot_keyword( normalize_robot_name(keyword_name_call_text), normalize_robot_name(keyword_name)) # Should work on the regular case too. assert matches_robot_keyword(normalize_robot_name("rar{a"), normalize_robot_name("rar{a")) assert matches_robot_keyword(normalize_robot_name("rara"), normalize_robot_name("rara"))
def on_keyword(self, keyword_found): """ :param IKeywordFound keyword_found: """ from robotframework_ls.impl.text_utilities import normalize_robot_name self._keywords_container.add_keyword(keyword_found) library_name = keyword_found.library_name library_alias = keyword_found.library_alias resource_name = keyword_found.resource_name if library_name: if library_alias: name = normalize_robot_name(library_alias) else: name = normalize_robot_name(library_name) dct = self._library_name_to_keywords_container elif resource_name: name = normalize_robot_name(resource_name) dct = self._resource_name_to_keywords_container else: log.info("No library name nor resource name for keyword: %s" % (keyword_found.name, )) return keyword_container = dct.get(name) if keyword_container is None: keyword_container = dct[name] = _KeywordContainer() keyword_container.add_keyword(keyword_found)
def get_builtin_normalized_names() -> FrozenSet[str]: from robotframework_ls.impl.text_utilities import normalize_robot_name normalized = list() for k, _ in BUILTIN_VARIABLES: normalized.append(normalize_robot_name(k)) return frozenset(normalized)
def _accept(self, k: str) -> bool: from robotframework_ls.impl.text_utilities import normalize_robot_name if normalize_robot_name(k) in self._builtins: return False else: return True
def collect_analysis_errors(completion_context): from robotframework_ls.impl import ast_utils from robotframework_ls.impl.ast_utils import create_error_from_node from robotframework_ls.impl.collect_keywords import collect_keywords from robotframework_ls.impl.text_utilities import normalize_robot_name errors = [] collector = _KeywordsCollector() collect_keywords(completion_context, collector) ast = completion_context.get_ast() for keyword_usage_info in ast_utils.iter_keyword_usage_tokens(ast): completion_context.check_cancelled() normalized_name = normalize_robot_name(keyword_usage_info.name) if not collector.contains_keyword(normalized_name): # There's not a direct match, but the library name may be builtin # into the keyword name, so, check if we have a match that way. node = keyword_usage_info.node error = create_error_from_node( node, "Undefined keyword: %s." % (keyword_usage_info.name, ), tokens=[keyword_usage_info.token], ) errors.append(error) if len(errors) >= MAX_ERRORS: # i.e.: Collect at most 100 errors break return errors
def _strip_token_bdd_prefix(token): """ This is a workaround because the parsing does not separate a BDD prefix from the keyword name. If the parsing is improved to do that separation in the future we can stop doing this. :return Token: Returns a new token with the bdd prefix stripped or the original token passed. """ from robotframework_ls.impl.robot_constants import BDD_PREFIXES from robot.api import Token from robotframework_ls.impl.text_utilities import normalize_robot_name text = normalize_robot_name(token.value) for prefix in BDD_PREFIXES: if text.startswith(prefix): new_name = token.value[len(prefix) :] return Token( type=token.type, value=new_name, lineno=token.lineno, col_offset=token.col_offset + len(prefix), error=token.error, ) return token
def add_keyword(self, keyword_found): from robotframework_ls.impl.text_utilities import normalize_robot_name normalized_name = normalize_robot_name(keyword_found.keyword_name) self._name_to_keyword[normalized_name] = keyword_found if "{" in normalized_name: self._names_with_variables.add(normalized_name)
def is_keyword_name_match(self, keyword_name): normalized = normalize_robot_name(keyword_name) if self.filter_text == normalized: return True if "{" in normalized: return matches_robot_keyword(self.filter_text, normalized) return False
def collect_analysis_errors(completion_context: ICompletionContext): errors = [] collector = _KeywordsCollector() collect_keywords(completion_context, collector) ast = completion_context.get_ast() for keyword_usage_info in ast_utils.iter_keyword_usage_tokens(ast): completion_context.check_cancelled() normalized_name = normalize_robot_name(keyword_usage_info.name) if not collector.contains_keyword(normalized_name): # There's not a direct match, but the library name may be builtin # into the keyword name, so, check if we have a match that way. node = keyword_usage_info.node error = create_error_from_node( node, "Undefined keyword: %s." % (keyword_usage_info.name, ), tokens=[keyword_usage_info.token], ) errors.append(error) else: multi = collector.check_multiple_keyword_definitions( normalized_name) if multi is not None: node = keyword_usage_info.node error = create_error_from_node( node, "Multiple keywords with name '%s' found. Give the full name of the keyword you want to use:\n%s" % (keyword_usage_info.name, "\n".join([ f" {m.library_alias}.{m.keyword_name}" for m in multi ])), tokens=[keyword_usage_info.token], ) errors.append(error) if len(errors) >= MAX_ERRORS: # i.e.: Collect at most 100 errors break errors.extend(CodeAnalysisVisitor.find_from(completion_context)) return errors
def add_keyword(self, keyword_found): from robotframework_ls.impl.text_utilities import normalize_robot_name normalized_name = normalize_robot_name(keyword_found.keyword_name) if normalized_name in self._name_to_keyword: if (self._name_to_keyword[normalized_name].library_name == keyword_found.library_name or self._name_to_keyword[normalized_name].resource_name == keyword_found.resource_name) \ and self._name_to_keyword[normalized_name].library_alias == keyword_found.library_alias: return if normalized_name not in self._multiple_keywords: self._multiple_keywords[normalized_name] = [ self._name_to_keyword[normalized_name] ] self._multiple_keywords[normalized_name].append(keyword_found) else: self._name_to_keyword[normalized_name] = keyword_found if "{" in normalized_name: self._names_with_variables.add(normalized_name)
def __init__(self, filter_text): self.filter_text = normalize_robot_name(filter_text)
def is_same_robot_name(self, word): return self.filter_text == normalize_robot_name(word)
def accepts_keyword_name(self, word): if not self.filter_text: return True return self.filter_text in normalize_robot_name(word)