def _compute_token_trace(self, state_trace, visited): tokstream = [] for state in state_trace: nid = state[0] if is_keyword(nid): tokstream.append([nid, self.langlet.get_node_name(nid)[4:]]) elif is_token(nid): if nid == self.langlet.token.NAME: name = self.Varnames[self._id % len(self.Varnames)] tokstream.append([nid, name]) self._id += 1 else: tokstream.append([ nid, self.tokgen.gen_token_string(nid + SYMBOL_OFFSET) ]) else: seg = self.segtree[nid:self.langlet.token.NAME] if seg: S, P = proj_segment(seg) for t in P: if t == self.langlet.token.NAME: tokstream.append([ t, self.langlet.get_node_name( S if S != 0 else nid) ]) elif is_keyword(t): tokstream.append( [t, self.langlet.get_node_name(t)[4:]]) else: tokstream.append([ t, self.tokgen.gen_token_string(t + SYMBOL_OFFSET) ]) else: nt_traces = self.token_traces.get(nid, []) if nt_traces: idx = self._cnt % len(nt_traces) self._cnt += 1 tokstream += nt_traces[idx] else: if nid in visited: return else: visited.add(nid) for i, st in enumerate(self.state_traces[nid][:]): tr = self._compute_token_trace(st, visited) if tr: tokstream += tr del self.state_traces[nid][i] tt = self.token_traces.get(nid, []) tt.append(tr) self.token_traces[nid] = tt break else: return visited.remove(nid) return tokstream
def _compute_token_trace(self, state_trace, visited): tokstream = [] for state in state_trace: nid = state[0] if is_keyword(nid): tokstream.append([nid, self.langlet.get_node_name(nid)[4:]]) elif is_token(nid): if nid == self.langlet.token.NAME: name = self.Varnames[self._id % len(self.Varnames)] tokstream.append([nid, name]) self._id += 1 else: tokstream.append([nid, self.tokgen.gen_token_string(nid + SYMBOL_OFFSET)]) else: seg = self.segtree[nid : self.langlet.token.NAME] if seg: S, P = proj_segment(seg) for t in P: if t == self.langlet.token.NAME: tokstream.append([t, self.langlet.get_node_name(S if S != 0 else nid)]) elif is_keyword(t): tokstream.append([t, self.langlet.get_node_name(t)[4:]]) else: tokstream.append([t, self.tokgen.gen_token_string(t + SYMBOL_OFFSET)]) else: nt_traces = self.token_traces.get(nid, []) if nt_traces: idx = self._cnt % len(nt_traces) self._cnt += 1 tokstream += nt_traces[idx] else: if nid in visited: return else: visited.add(nid) for i, st in enumerate(self.state_traces[nid][:]): tr = self._compute_token_trace(st, visited) if tr: tokstream += tr del self.state_traces[nid][i] tt = self.token_traces.get(nid, []) tt.append(tr) self.token_traces[nid] = tt break else: return visited.remove(nid) return tokstream
def _compute_embedd_tr(langlet, tr, r, nid, segtree, start_symbols): nfas = langlet.parse_nfa.nfas for X in nfas: if X in start_symbols: continue if r in langlet.parse_nfa.symbols_of[X]: (start_X, _, _) = nfa_X = nfas[X] for _tr_X in compute_state_traces(nfa_X): for i, S in enumerate(_tr_X): if S[0] == r: tr_X = _tr_X[:i] + tr + _tr_X[i + 1 :] segment = segtree[nid:X] if segment: sg = proj_segment(segment)[:] for j, item in enumerate(sg): if not is_symbol(item): sg[j] = (item, 1, nid) i = sg.index(X) return sg[:i] + tr_X + sg[i + 1 :] else: trace = _compute_embedd_tr(langlet, tr_X, X, nid, segtree, start_symbols) return trace return []
def _compute_embedd_tr(langlet, tr, r, nid, segtree, start_symbols): nfas = langlet.parse_nfa.nfas for X in nfas: if X in start_symbols: continue if r in langlet.parse_nfa.symbols_of[X]: (start_X, _, _) = nfa_X = nfas[X] for _tr_X in compute_state_traces(nfa_X): for i, S in enumerate(_tr_X): if S[0] == r: tr_X = _tr_X[:i] + tr + _tr_X[i + 1:] segment = segtree[nid:X] if segment: sg = proj_segment(segment)[:] for j, item in enumerate(sg): if not is_symbol(item): sg[j] = (item, 1, nid) i = sg.index(X) return sg[:i] + tr_X + sg[i + 1:] else: trace = _compute_embedd_tr(langlet, tr_X, X, nid, segtree, start_symbols) return trace return []