class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s set lexer = NamedGroupLexer token float '\d+\.\d+' $ float token int '\d+' int token text '\w+' ; token str '".*?"' $ lambda s: s[1:-1] token brackets '\{(.|\n)*?\}' "{}" separator spaces '\s+' ; separator comments '\(.*?\)' ; START/$t, type(t)$ -> ( float/t | int/t | text/t | str/t | brackets/t ) ; POSITIONS/lst -> $ lst = [] ( $ line, column = self.line(), self.column() ( text text int $ lst.append(None) | text int $ lst.append(None) | text/t $ lst.append((t, line, column)) | brackets/t $ lst.append((t, line, column)) ) )* ; """%tpg.Py() verbose = VERBOSE
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s set word_boundary = False START/n -> ( 'a' 'b' 'c' 'd' $ n = 1 | 'a' 'b' 'd' 'c' $ n = 2 | 'a' ( 'c' 'b' 'd' $ n = 3 | 'c' 'd' 'b' $ n = 4 | 'd' ( 'b' 'c' $ n = 5 | 'c' 'b' $ n = 6 ) ) | 'b' ( 'a' ( 'c' 'd' $ n = 7 | 'd' 'c' $ n = 8 ) | 'c' ( 'a' 'd' $ n = 9 | 'd' 'a' $ n = 10 ) ) | 'e' $ n = 11 ) ; """%tpg.Py()
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s separator spaces '\s+' ; START<a, b, *args, **kws>/<a, b, args, kws> -> S<a, b, *args, **kws>/<a, b, args, kws> ; S<a, b, *args, **kws>/<a, b, args, kws> -> ; """%tpg.Py()
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s token word '\w+' ; START/"Axiom == START" -> ; SYMBOL1/"Axiom == SYMBOL1" -> ; SYMBOL2/"Axiom == SYMBOL2" -> ; """%tpg.Py()
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s D -> ( x # not empty | ( ) # empty !!! | x y # not empty ) ; """%tpg.Py()
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s set lexer_unicode = True token single_quote '[‘’]' ; token double_quote '["“”]' ; token word '\w+' ; START/x -> double_quote word/x double_quote | single_quote word/x single_quote | '`' word/x '´' ; """%tpg.Py()
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s separator spaces '\s+' ; token int '\-?\d+' int ; POSITIVE_1/i -> int/i check $i>0$ ; POSITIVE_2/i -> int/i $self.check(i>0)$ ; POSITIVE_3/i -> int/i ( check $i>0$ | error "i <= 0" ) ; POSITIVE_4/i -> int/i ( check $i>0$ | error $"%%d <= 0"%%i$ ) ; POSITIVE_5/i -> int/i ( check $i>0$ | $self.error("%%d <= 0"%%i)$ ); POSITIVE_6/i -> int/i ( check $i>0$ | $i=None$ ) ; POSITIVE_7/i -> int/i ( $self.check(i>0)$ | $i=None$ ) ; """%tpg.Py()
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s START/x -> $ foo = "*foo*" ( "1" IDENT<foo>/x1 IDENT<314>/x2 $ x = x1, x2 $ | "2" STRING<"2">/x | "3" CODE<$"a"+"b"$>/x ) ; IDENT<i>/i -> ; STRING<s>/"a string" -> check $s=="2"$ ; CODE<x>/$1+2$ -> check $x=="ab"$ ; """%tpg.Py()
class NotDotAll(PARSER): __doc__ = r""" set lexer = %(LEXER)s set lexer_dotall = False token ch '.' ; token nl '\n' ; START/$c,n$ -> $ c, n = 0, 0 ( ch $ c += 1 | nl $ n += 1 )* ; """%tpg.Py() verbose = VERBOSE
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s set lexer_dotall = True separator spaces '\s+' ; token tok "\(.*?\)" ; START/lst -> $ lst = [] ( $ lst.append((self.line(), self.column())) @t $ lst.append((t.line, t.column)) tok $ lst.append((self.line(t), self.column(t))) @t $ lst.append((t.line, t.column)) )* ; """%tpg.Py()
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s separator spaces '\s+' ; token w '\w+' ; STAR/n -> $n=0$ ( w $n=n+1$ )* ; PLUS/n -> $n=0$ ( w $n=n+1$ )+ ; QUES/n -> $n=0$ ( w $n=n+1$ )? ; REP1/n -> $n=0$ ( w $n=n+1$ ){} ; REP2/n -> $n=0$ ( w $n=n+1$ ){3} ; REP3/n -> $n=0$ ( w $n=n+1$ ){,} ; REP4/n -> $n=0$ ( w $n=n+1$ ){,3} ; REP5/n -> $n=0$ ( w $n=n+1$ ){2,} ; REP6/n -> $n=0$ ( w $n=n+1$ ){2,5} ; """%tpg.Py()
class Parser(PARSER): __doc__ = r""" set lexer = %(LEXER)s separator spaces '\s+' ; START/lst -> $ lst = [] @start ( '\(' @start1 '\w+'* @stop1 '\)' $ lst.append(self.extract(start1, stop1)) )* @stop $ lst.append(self.extract(start, stop)) ; """%tpg.Py()
class NotIgnoreCase(PARSER): __doc__ = r""" set lexer = %(LEXER)s set lexer_ignorecase = False separator spaces '\s+' ; token a 'a' ; token b 'B' ; START/lst -> $ lst = [] ( a/t $ lst.append(t) | b/t $ lst.append(t) )* ; """%tpg.Py() verbose = VERBOSE
class NotVerbose(PARSER): __doc__ = r""" set lexer = %(LEXER)s set lexer_verbose = False token foobar "foo bar" ; token foo "foo" ; token bar "bar" ; separator spaces '\s+' ; START/lst -> $ lst = [] ( foobar $ lst.append(1) | foo $ lst.append(2) | bar $ lst.append(3) )* ; """%tpg.Py() verbose = VERBOSE
class NotWordBounded(PARSER): __doc__ = r""" set lexer = %(LEXER)s set word_boundary = False separator spaces '\s+' ; token a 'a' ; token b 'b' ; token w '\w+' ; START/lst -> $ lst = [] ( a/t $ lst.append(t) | b/t $ lst.append(t) | w/t $ lst.append(t) )* ; """%tpg.Py() verbose = VERBOSE
class NotMultiline(PARSER): __doc__ = r""" set lexer = %(LEXER)s set lexer_multiline = False token b '^b' ; token e 'e$' ; token w '\w' ; separator spaces '\s+' ; START/$nb,nw,ne$ -> $ nb, nw, ne = 0, 0, 0 ( b $ nb += 1 | e $ ne += 1 | w $ nw += 1 )* ; """%tpg.Py() verbose = VERBOSE
class Verbose(PARSER): __doc__ = (r""" set lexer = %(LEXER)s set lexer_verbose = True token foobar "foo bar" ; token foo "foo" ; token bar "bar" ; token triple_quoted_1 ''' "{3} ( \\. | "{0,2} [^"\\]+ )* "{3} ''' ; """ + r''' token triple_quoted_2 """ '{3} ( \\. | '{0,2} [^'\\]+ )* '{3} """ ; ''' + r""" separator spaces '\s+' ; START/lst -> $ lst = [] ( foobar $ lst.append(1) | foo $ lst.append(2) | bar $ lst.append(3) | triple_quoted_1 $ lst.append(4) | triple_quoted_2 $ lst.append(5) )* ; """)%tpg.Py() verbose = VERBOSE
class WordBounded2(PARSER): __doc__ = r""" set lexer = %(LEXER)s set word_boundary = True separator spaces '\s+' ; token abc 'abc' ; token def 'def:' ; token ghi ':ghi' ; token other '\w+|:' ; START/lst -> $ lst = [] ( abc/t $ lst.append(t) | def/t $ lst.append(t) | ghi/t $ lst.append(t) | other )* ; """%tpg.Py() verbose = VERBOSE