Пример #1
0
def compute_langlet_expr(langlet, start_symbol=None):
    running_cycle = set()
    state_traces = {}
    for s, nfa in langlet.parse_nfa.nfas.items():
        if s is start_symbol or s not in start_symbols:
            state_traces[s] = compute_state_traces(nfa)
    _, start, _ = langlet.parse_nfa.nfas[start_symbol]

    segtree = SegmentTree(langlet)
    segtree.create()
    supertrace = compute_super_tr(langlet, start, state_traces, segtree,
                                  running_cycle, start_symbols)
    flat_traces = compute_flat_tr(langlet)
    langlet_trace = []
    for t in supertrace:
        if is_symbol(t[0]):
            langlet_trace.extend(flat_traces[t[0]])
        else:
            langlet_trace.append(t[0])
    # for item in langlet_trace:
    #    print item, langlet.get_node_name(item)
    tgen = TokenGenerator(langlet, stdlen=1)
    tokstream = []
    letters = "abcdefg"
    i = 0
    for tid in langlet_trace:
        if tid == langlet.token.NAME:
            tokstream.append([tid, letters[i % len(letters)]])
            i += 1
        elif is_keyword(tid):
            tokstream.append([tid, langlet.get_node_name(tid)[4:]])
        else:
            tokstream.append([tid, tgen.gen_token_string(tid + SYMBOL_OFFSET)])
    return langlet.unparse([1000] + tokstream)
Пример #2
0
 def __init__(self, langlet, start_symbol=None):
     self.langlet = langlet
     self.start_symbol = start_symbol
     self.state_traces = {}
     self.unused_symbols = set()
     self.compute_unused_symbols()
     self.segtree = SegmentTree(langlet)
     self.segtree.create()
     self.token_traces = {}
     self.expr_types = set([self.start_symbol])
     self.compute_expr_types()
     self.tokgen = TokenGenerator(langlet, stdlen=1)
     self._cnt = 1
     self._id = 0
     self._expressions = []
Пример #3
0
 def __init__(self, langlet, kind = "parse"):
     self.langlet   = langlet
     self.tokgen = TokenGenerator(langlet)