Ejemplo n.º 1
0
 def get_default_tokenizer(cls):
     return tokenizers.create_shlex_tokenizer(
         with_bof=True,
         with_eof=True,
         wordchars=string.ascii_uppercase + string.ascii_lowercase +
         string.digits + '_.',
     )
Ejemplo n.º 2
0
def test_basic_tokenizer_yields_token_and_lookahead():
    tokenizer = tokenizers.create_shlex_tokenizer()

    tokens = list(ShiftReduceParser.tokenize_with_lookahead(tokenizer, ''))
    assert len(tokens) == 0

    tokens = list(ShiftReduceParser.tokenize_with_lookahead(
        tokenizer, 'a b c'))
    assert len(tokens) == 3
    assert tokens[0] == ('a', 'b')
    assert tokens[1] == ('b', 'c')
    assert tokens[2] == ('c', None)
Ejemplo n.º 3
0
def test_tokenizer_with_bof_yields_token_and_lookahead():
    tokenizer = tokenizers.create_shlex_tokenizer(with_bof=True)

    tokens = list(ShiftReduceParser.tokenize_with_lookahead(tokenizer, ''))
    assert len(tokens) == 1
    assert tokens[0] == (BOF_VALUE, None)

    tokens = list(ShiftReduceParser.tokenize_with_lookahead(
        tokenizer, 'a b c'))
    assert len(tokens) == 4
    assert tokens[0] == (BOF_VALUE, 'a')
    assert tokens[1] == ('a', 'b')
    assert tokens[2] == ('b', 'c')
    assert tokens[3] == ('c', None)
Ejemplo n.º 4
0
 def get_default_tokenizer(cls):
     return create_shlex_tokenizer(wordchars=string.ascii_uppercase +
                                   string.ascii_lowercase + string.digits +
                                   '_.', )
Ejemplo n.º 5
0
 def get_default_tokenizer(cls):
     return tokenizers.create_shlex_tokenizer(
         with_bof=True,
         with_eof=True,
         wordchars=string.digits + '.',
     )
Ejemplo n.º 6
0
 def get_default_tokenizer(cls):
     return create_shlex_tokenizer(wordchars=string.digits + '.')
Ejemplo n.º 7
0
 def get_default_tokenizer(cls):
     return tokenizers.create_shlex_tokenizer()