Пример #1
0
 def __init__(self, bnf_grammar):
     self._bnf_grammar = bnf_grammar
     self.generator = tokenize(
         bnf_grammar,
         version_info=parse_version_string('3.6')
     )
     self._gettoken()  # Initialize lookahead
Пример #2
0
 def __init__(self, bnf_grammar):
     self._bnf_grammar = bnf_grammar
     self.generator = tokenize(
         bnf_grammar,
         version_info=parse_version_string('3.6')
     )
     self._gettoken()  # Initialize lookahead
Пример #3
0
def _(source, source_file):
    source_string = python_bytes_to_unicode(source)
    for token in tokenize(
        source_string,
        version_info=parse_version_string('3.10')
    ):
        pass
Пример #4
0
 def __init__(self, bnf_text, token_namespace):
     self._bnf_text = bnf_text
     self.generator = tokenize.tokenize(
         bnf_text, version_info=parse_version_string('3.6'))
     self._gettoken()  # Initialize lookahead
     self.dfas, self.startsymbol = self._parse()
     self.first = {}  # map from symbol name to set of tokens
     self._addfirstsets()
     self._token_namespace = token_namespace
def tokenize_code(code, inference=False):
    if inference:
        return [
            token_filter(token.type, token.string)
            for token in tokenize_without_endmarker(code)
        ]
    return [
        token_filter(token.type, token.string)
        for token in tokenize(code, (2, 7))
    ]
Пример #6
0
def tokenize_without_endmarker(code):
    safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE'
    for token in tokenize(code + safeword, (2, 7)):
        if token.string == safeword:
            return
        elif token.string.endswith(safeword):
            yield PythonToken(token.type, token.string[:-len(safeword)],
                              token.start_pos, token.prefix)
            return
        else:
            yield token
Пример #7
0
 def __init__(self, bnf_text, token_namespace):
     self._bnf_text = bnf_text
     self.generator = tokenize.tokenize(
         bnf_text,
         version_info=parse_version_string('3.6')
     )
     self._gettoken()  # Initialize lookahead
     self.dfas, self.startsymbol = self._parse()
     self.first = {}  # map from symbol name to set of tokens
     self._addfirstsets()
     self._token_namespace = token_namespace
Пример #8
0
 def _tokenize(self, code):
     # Used by Jedi.
     return tokenize(code, self.version_info)
Пример #9
0
 def _tokenize(self, code):
     # Used by Jedi.
     return tokenize(code, self.version_info)
Пример #10
0
def _get_token_list(string):
    # Load the current version.
    version_info = parse_version_string()
    return list(tokenize.tokenize(string, version_info))
Пример #11
0
def test_tokenize_start_pos(code, positions):
    tokens = list(tokenize(code, version_info=(3, 6)))
    assert positions == [p.start_pos for p in tokens]
def tokenize_code(code):
    return [(token.type, token.string if token.type != 3 else '')
            for token in tokenize(code, (2, 7))]
Пример #13
0
from parso.python.tokenize import tokenize
import json
import sys

file = open(sys.argv[1], 'r')
file_content = file.read()

tokens = []

for token in tokenize(file_content, version_info=(3, 6)):
    if (str(token.string) != ""):
        tmp = dict()
        tmp["line"] = (token.start_pos)[0]
        tmp["char"] = ((token.start_pos)[1]) + 1
        tmp["type"] = ((str(token.type))[17:]).strip(")")
        if tmp["type"] == "OP":
            tmp["type"] += "-" + str(token.string)
        tmp["value"] = str(token.string)
        tokens.append(tmp)

print(json.dumps(tokens, indent=4, sort_keys=True))
Пример #14
0
def test_tokenize_start_pos(code, positions):
    tokens = list(tokenize(code, version_info=(3, 6)))
    assert positions == [p.start_pos for p in tokens]