Esempio n. 1
0
 def PPrint(self, level=0, pref="", post="", end="\n", first_indent=True):
     space = gen_indentation(level)
     if first_indent:
         print(space, end="")
     print(pref + self.__class__.__name__ + "(", end="")
     thing_pprint(self.thing, level=level+1, end="", first_indent=False)
     print(")" + post, end=end)
Esempio n. 2
0
 def PPrint(self, level=0, pref="", post="", end="\n", first_indent=True):
     space = gen_indentation(level)
     if first_indent:
         print(space, end="")
     print(pref + self.__class__.__name__ + "(" + repr(self.name))
     thing_pprint(self.subnodes, level+1, ", ", end="")
     #pprint_node_list(nlist=self.subnodes, level=level+1, pref="")
     print(")" + post, end=end)
Esempio n. 3
0
tok_opencurlybr = string_to_tok_by_type("{", tt_list)
tok_closecurlbr = Token("}", tt_terminator)

tok_opensqbr  = Token("[", tt_terminator)
tok_closesqbr = Token("]", tt_terminator)

tok_comma     = Token(".", tt_terminator)
tok_period    = Token(",", tt_terminator)
tok_colon     = Token(":", tt_terminator)
tok_semicolon = Token(";", tt_terminator)
tok_squote    = Token("'", tt_terminator)
tok_dquote    = Token('"', tt_terminator)
tok_accent    = Token("`", tt_terminator)
#tok_eof       = Token("",  tt_eof)


token_iterator = TokenIterator(TextReader(open("ebnf_test.txt"))
                               , tt_list, tt_skip_list)
token_reader = TokenReader(token_iterator)


if __name__ == '__main__':
    for tok in token_iterator:
        thing_pprint(tok)
    # rslt = token_reader.read_next()
    # tok = rslt.readedlist[0]
    # while tok:
    #     tok.PPrint()
    #     rslt = token_reader.read_next()
    #     tok = rslt.readedlist[0]
Esempio n. 4
0
pn_concatenation = Node("pn_concatenation"
                        , Seq(pn_ident_or_term_or_pn_groups, NotNeed(","), TRef("pn_rhs", grammar=cur_grammar, flat=True))
                        , flat_eq_name=True, grammar=cur_grammar)

pn_alteration = Node("pn_alteration"
                     , Seq(pn_ident_or_term_or_pn_groups, NotNeed("|"), TRef("pn_rhs", grammar=cur_grammar, flat=True))
                     , flat_eq_name=True, grammar=cur_grammar)

pn_rhs = Node("pn_rhs", Or(pn_concatenation
                           , pn_alteration
                           , pn_groups
                           , pn_ident_or_term)
              , grammar=cur_grammar)

pn_rule = Node("pn_rule", Seq(pn_lhs, NotNeed("="), pn_rhs, NotNeed(";")), grammar=cur_grammar)
pn_grammar = Node("ebnf_grammar", ZeroOrMore(pn_rule), grammar=cur_grammar)


#token_iterator = TokenIterator(TextReader(open("test1.txt"))
#                               , tt_list, tt_skip_list)
#token_reader = TokenReader(token_iterator)
#token_reader = TokenReader(TokenIterator(TextReader(open("test1.txt")), tt_list, tt_skip_list))


if __name__ == '__main__':
    # SetRecursionLimit(5000)
    # lexing with tokens reduces recursion level
    rslt = pn_grammar.read_from(token_reader)
    thing_pprint(rslt.readedlist)