コード例 #1
0
 def tokenize(self, text, language):
     """
     Return all tokens in `text` and all keywords associated by
     Pygments to `language`.
     """
     try:
         lexer = get_lexer_by_name(language)
         keywords = get_keywords(lexer)
     except Exception:
         keywords = []
     keyword_set = set(keywords)
     keywords = [{
         'kind': CompletionItemKind.KEYWORD,
         'insertText': keyword,
         'sortText': u'zz{0}'.format(keyword[0].lower()),
         'filterText': keyword,
         'documentation': ''
     } for keyword in keywords]
     # logger.debug(keywords)
     # tokens = list(lexer.get_tokens(text))
     # logger.debug(tokens)
     tokens = get_words(text, language)
     tokens = [{
         'kind': CompletionItemKind.TEXT,
         'insertText': token,
         'sortText': u'zz{0}'.format(token[0].lower()),
         'filterText': token,
         'documentation': ''
     } for token in tokens]
     for token in tokens:
         if token['insertText'] not in keyword_set:
             keywords.append(token)
     return keywords
コード例 #2
0
    def tokenize(self, text, offset, language, current_word):
        """
        Return all tokens in `text` and all keywords associated by
        Pygments to `language`.
        """
        valid = is_prefix_valid(text, offset, language)
        if not valid:
            return []

        # Get language keywords provided by Pygments
        try:
            lexer = get_lexer_by_name(language)
            keywords = get_keywords(lexer)
        except Exception:
            keywords = []
        keyword_set = set(keywords)
        keywords = [{
            'kind': CompletionItemKind.KEYWORD,
            'insertText': keyword,
            'label': keyword,
            'sortText': keyword,
            'filterText': keyword,
            'documentation': '',
            'provider': FALLBACK_COMPLETION
        } for keyword in keywords]

        # Get file tokens
        tokens = get_words(text, offset, language)
        tokens = [{
            'kind': CompletionItemKind.TEXT,
            'insertText': token,
            'label': token,
            'sortText': token,
            'filterText': token,
            'documentation': '',
            'provider': FALLBACK_COMPLETION
        } for token in tokens]
        for token in tokens:
            if token['insertText'] not in keyword_set:
                keywords.append(token)

        # Filter matching results
        if current_word is not None:
            current_word = current_word.lower()
            keywords = [
                k for k in keywords if current_word in k['insertText'].lower()
            ]

        return keywords
コード例 #3
0
 def tokenize(self, text, offset, language):
     """
     Return all tokens in `text` and all keywords associated by
     Pygments to `language`.
     """
     valid = is_prefix_valid(text, offset, language)
     if not valid:
         return []
     try:
         lexer = get_lexer_by_name(language)
         keywords = get_keywords(lexer)
     except Exception:
         keywords = []
     keyword_set = set(keywords)
     keywords = [{
         'kind': CompletionItemKind.KEYWORD,
         'insertText': keyword,
         'label': keyword,
         'sortText': keyword,
         'filterText': keyword,
         'documentation': '',
         'provider': FALLBACK_COMPLETION
     } for keyword in keywords]
     # logger.debug(keywords)
     # tokens = list(lexer.get_tokens(text))
     # logger.debug(tokens)
     tokens = get_words(text, offset, language)
     tokens = [{
         'kind': CompletionItemKind.TEXT,
         'insertText': token,
         'label': token,
         'sortText': token,
         'filterText': token,
         'documentation': '',
         'provider': FALLBACK_COMPLETION
     } for token in tokens]
     for token in tokens:
         if token['insertText'] not in keyword_set:
             keywords.append(token)
     return keywords
コード例 #4
0
def test_get_words():
    source = 'foo bar123 baz car456'
    tokens = get_words(source, 5, 'python')
    assert set(tokens) == {'foo', 'baz', 'car456'}