Example #1
0
def get_token_string(langlet, nid):
    if is_keyword(nid):
        return langlet.keywords[nid]
    if nid+SYMBOL_OFFSET in langlet.lex_nfa.constants:
        return langlet.lex_nfa.constants[nid]
    else:
        return langlet.get_node_name(nid)
Example #2
0
 def __init__(self, langlet, nfa):
     super(ReverseGen, self).__init__(ls_grammar)
     self.rule_langlet = langlet
     self.nfa = nfa
     self.strings = []
     self.names = []
     trans = nfa[2]
     start = nfa[1]
     self.rule_name = langlet.get_node_name(start[0])
     for T in trans:
         if is_keyword(T[0]):
             self.strings.append(langlet.keywords[T[0]])
         else:
             if T!=start:
                 self.names.append(langlet.get_node_name(T[0]))
     self.K = 0
     for S in self.nfa[2]:
         self.K+=len(self.nfa[2][S])