pn_concatenation = Node("pn_concatenation" , Seq(pn_ident_or_term_or_pn_groups, NotNeed(","), TRef("pn_rhs", grammar=cur_grammar, flat=True)) , flat_eq_name=True, grammar=cur_grammar) pn_alteration = Node("pn_alteration" , Seq(pn_ident_or_term_or_pn_groups, NotNeed("|"), TRef("pn_rhs", grammar=cur_grammar, flat=True)) , flat_eq_name=True, grammar=cur_grammar) pn_rhs = Node("pn_rhs", Or(pn_concatenation , pn_alteration , pn_groups , pn_ident_or_term) , grammar=cur_grammar) pn_rule = Node("pn_rule", Seq(pn_lhs, NotNeed("="), pn_rhs, NotNeed(";")), grammar=cur_grammar) pn_grammar = Node("ebnf_grammar", ZeroOrMore(pn_rule), grammar=cur_grammar) #token_iterator = TokenIterator(TextReader(open("test1.txt")) # , tt_list, tt_skip_list) #token_reader = TokenReader(token_iterator) #token_reader = TokenReader(TokenIterator(TextReader(open("test1.txt")), tt_list, tt_skip_list)) if __name__ == '__main__': # SetRecursionLimit(5000) # lexing with tokens reduces recursion level rslt = pn_grammar.read_from(token_reader) thing_pprint(rslt.readedlist)
, flat_eq_name=True, grammar=cur_grammar) alteration = Node("alteration" , Seq(ident_or_term_or_groups, NotNeed("|"), TRef("rhs", grammar=cur_grammar, flat=True)) , flat_eq_name=True, grammar=cur_grammar) rhs = Node("rhs", Or( concatenation , alteration , groups , ident_or_term) , grammar=cur_grammar) rule = Node("rule", Seq(lhs, NotNeed("="), rhs, NotNeed(";")), grammar=cur_grammar) grammar = Node("ebnf_grammar", ZeroOrMore(rule), grammar=cur_grammar) if __name__ == '__main__': space = Node("space", Concat(OneOrMore(Or(" ", "\t", "\n"))), skip=True) comment = Node("comment", Seq("(", "*" , Node("text", Concat(ZeroOrMore(Not(Seq("*", ")"))))) , "*", ")") , skip=True) skip_pattern = OneOrMore(Or(space, comment)) tr = TextReader(open("ebnf_test.txt"), skip_pattern=skip_pattern) #SetRecursionLimit(5000) rslt = grammar.read_from(tr) thing_pprint(rslt.readedlist)