Пример #1
0
def test_error_info():
    tokenize = make_tokenizer([
        Spec('keyword', r'(is|end)'),
        Spec('id', r'[a-z]+'),
        Spec('space', r'[ \t]+'),
        Spec('nl', r'[\n\r]+'),
    ])
    try:
        list(tokenize('f is ф'))
    except LexerError as e:
        pass
    else:
        ok_(False, 'must raise LexerError')

    keyword = lambda s: tok('keyword', s)

    id = tok('id')
    is_ = keyword('is')
    end = keyword('end')
    nl = tok('nl')

    equality = id + skip(is_) + id >> tuple
    expr = equality + skip(nl)
    file = many(expr) + end

    msg = """\
rake is eggs
eggs isnt spam
end"""
    toks = [x for x in tokenize(msg) if x.type != 'space']
    try:
        file.parse(toks)
    except ParserError as e:
        msg, pos, i = e.args
        eq_(msg, "got unexpected token: id 'spam'")
        eq_(pos, ((2, 11), (2, 14)))
        # May raise KeyError
        t = toks[i]
        eq_(t, Token('id', 'spam'))
    else:
        ok_(False, 'must raise ParserError')
Пример #2
0
def test_error_info():
    tokenize = make_tokenizer([
        Spec('keyword', r'(is|end)'),
        Spec('id',      r'[a-z]+'),
        Spec('space',   r'[ \t]+'),
        Spec('nl',      r'[\n\r]+'),
    ])
    try:
        list(tokenize('f is ф'))
    except LexerError as e:
        pass
    else:
        ok_(False, 'must raise LexerError')

    keyword = lambda s: tok('keyword', s)

    id = tok('id')
    is_ = keyword('is')
    end = keyword('end')
    nl = tok('nl')

    equality = id + skip(is_) + id >> tuple
    expr = equality + skip(nl)
    file = many(expr) + end

    msg = """\
rake is eggs
eggs isnt spam
end"""
    toks = [x for x in tokenize(msg) if x.type != 'space']
    try:
        file.parse(toks)
    except ParserError as e:
        msg, pos, i = e.args
        eq_(msg, "got unexpected token: id 'spam'")
        eq_(pos, ((2, 11), (2, 14)))
        # May raise KeyError
        t = toks[i]
        eq_(t, Token('id', 'spam'))
    else:
        ok_(False, 'must raise ParserError')
Пример #3
0
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

from funcparserlib.parser import skip, tok
from functools import reduce

__all__ = [
    'const', 'flatten', 'unarg', 'tokval', 'mktok', 'n', 'op', 'op_', 'sometok',
    'sometoks',
]

# Well-known functions
const = lambda x: lambda _: x
flatten = lambda list: sum(list, [])
unarg = lambda f: lambda args: f(*args)

# Auxiliary functions for lexers
tokval = lambda tok: tok.value

# Auxiliary functions for parsers
mktok = lambda type: lambda value: tok(type, value) >> tokval
n = mktok('name')
op = mktok('op')
op_ = lambda s: skip(op(s))
sometok = lambda type: tok(type) >> tokval

sometoks = lambda types: reduce(lambda p, type: p | tok(type)
                                                if p else tok(type),
                                types,
                                None) >> tokval

Пример #4
0
# Issue 14
def test_error_info():
    tokenize = make_tokenizer([
        Spec('keyword', r'(is|end)'),
        Spec('id', r'[a-z]+'),
        Spec('space', r'[ \t]+'),
        Spec('nl', r'[\n\r]+'),
    ])
    try:
        list(tokenize(u'f is ф'))
    except LexerError, e:
        eq_(unicode(e), u'1,6-1,6: cannot tokenize data: "f is \u0444"')
    else:
        ok_(False, 'must raise LexerError')

    keyword = lambda s: tok('keyword', s)

    id = tok('id')
    is_ = keyword('is')
    end = keyword('end')
    nl = tok('nl')

    equality = id + skip(is_) + id >> tuple
    expr = equality + skip(nl)
    file = many(expr) + end

    msg = """\
spam is eggs
eggs isnt spam
end"""
    toks = [x for x in tokenize(msg) if x.type != 'space']
Пример #5
0
# Issue 14
def test_error_info():
    tokenize = make_tokenizer([
        Spec('keyword', r'(is|end)'),
        Spec('id',      r'[a-z]+'),
        Spec('space',   r'[ \t]+'),
        Spec('nl',      r'[\n\r]+'),
    ])
    try:
        list(tokenize(u'f is ф'))
    except LexerError, e:
        eq_(unicode(e), u'1,6-1,6: cannot tokenize data: "f is \u0444"')
    else:
        ok_(False, 'must raise LexerError')

    keyword = lambda s: tok('keyword', s)

    id = tok('id')
    is_ = keyword('is')
    end = keyword('end')
    nl = tok('nl')

    equality = id + skip(is_) + id >> tuple
    expr = equality + skip(nl)
    file = many(expr) + end

    msg = """\
spam is eggs
eggs isnt spam
end"""
    toks = [x for x in tokenize(msg) if x.type != 'space']