Example #1
0
def get_parse_table(g, item_sets, raw_goto_dict):
	action_dict = {}
	goto_dict = {}
	for itmset in item_sets:
		for exp in itmset.item_set:
			token = exp.get_token_after_dot()
			if not tokens.is_terminal(token):
				continue
			to_state = raw_goto_dict.get(itmset, {}).get(token)
			if to_state is None:
				continue
			if itmset not in action_dict:
				action_dict[itmset] = {}
			if token not in action_dict[itmset]:
				action_dict[itmset][token] = set()
			action_dict[itmset][token].add((ACTION_SHIFT, to_state))

	for itmset in item_sets:
		for exp in itmset.item_set:
			if exp.is_pending_reduce():
				left = exp.left_token
				if left != g.start_token:
					if itmset not in action_dict:
						action_dict[itmset] = {}
					#TODO(kdy): grammar is expanded before building lr1 item set, 
					#and acc tokens are not merged when building closure, 
					#so the for loop gets only one token, may be a bad design ?
					for acc in exp.acc_tokens:
						if acc not in action_dict[itmset]:
							action_dict[itmset][acc] = set()
						action_dict[itmset][acc].add((ACTION_REDUCE, exp))	
				else :
					if itmset not in action_dict:
						action_dict[itmset] = {}
					lst = list(exp.acc_tokens)
					assert (len(lst) == 1 and tokens.is_acc(lst[0]))
					if lst[0] not in action_dict[itmset]:
						action_dict[itmset][lst[0]] = set()
					action_dict[itmset][lst[0]].add((ACTION_ACC, ))
	for from_set, edges in raw_goto_dict.iteritems():
		for token, to_set in edges.iteritems():
			if tokens.is_terminal(token):
				continue
			if from_set not in goto_dict:
				goto_dict[from_set] = dict()
			goto_dict[from_set][token] = to_set
	return action_dict, goto_dict
Example #2
0
def get_parse_table(g, item_sets, raw_goto_dict):
    action_dict = {}
    goto_dict = {}
    for itmset in item_sets:
        for exp in itmset.item_set:
            token = exp.get_token_after_dot()
            if not tokens.is_terminal(token):
                continue
            to_state = raw_goto_dict.get(itmset, {}).get(token)
            if to_state is None:
                continue
            if itmset not in action_dict:
                action_dict[itmset] = {}
            if token not in action_dict[itmset]:
                action_dict[itmset][token] = set()
            action_dict[itmset][token].add((ACTION_SHIFT, to_state))

    for itmset in item_sets:
        for exp in itmset.item_set:
            if exp.is_pending_reduce():
                left = exp.left_token
                if left != g.start_token:
                    if itmset not in action_dict:
                        action_dict[itmset] = {}
                    #TODO(kdy): grammar is expanded before building lr1 item set,
                    #and acc tokens are not merged when building closure,
                    #so the for loop gets only one token, may be a bad design ?
                    for acc in exp.acc_tokens:
                        if acc not in action_dict[itmset]:
                            action_dict[itmset][acc] = set()
                        action_dict[itmset][acc].add((ACTION_REDUCE, exp))
                else:
                    if itmset not in action_dict:
                        action_dict[itmset] = {}
                    lst = list(exp.acc_tokens)
                    assert (len(lst) == 1 and tokens.is_acc(lst[0]))
                    if lst[0] not in action_dict[itmset]:
                        action_dict[itmset][lst[0]] = set()
                    action_dict[itmset][lst[0]].add((ACTION_ACC, ))
    for from_set, edges in raw_goto_dict.iteritems():
        for token, to_set in edges.iteritems():
            if tokens.is_terminal(token):
                continue
            if from_set not in goto_dict:
                goto_dict[from_set] = dict()
            goto_dict[from_set][token] = to_set
    return action_dict, goto_dict
Example #3
0
def get_first_set(gram, t):
	ret = set()
	g = gram.normalized_mode
	if tokens.is_terminal(t) or tokens.is_epsilon(t):
		ret.add(t)
		return ret
	eps_token = fact.create_epsilon()
	exps = g.get_expresses_by_left(t)
	for exp in exps:
		for _tokens in exp.right_tokens_list:
			tmp = get_first_set_multi(gram, _tokens)
			ret = ret.union(tmp)
	return ret
Example #4
0
def get_first_set(gram, t):
    ret = set()
    g = gram.normalized_mode
    if tokens.is_terminal(t) or tokens.is_epsilon(t):
        ret.add(t)
        return ret
    eps_token = fact.create_epsilon()
    exps = g.get_expresses_by_left(t)
    for exp in exps:
        for _tokens in exp.right_tokens_list:
            tmp = get_first_set_multi(gram, _tokens)
            ret = ret.union(tmp)
    return ret
Example #5
0
	def __init__(self, left_token, right_tokens, dot_pos, acc_tokens, piority = None):
		super(LR1_Express, self).__init__(left_token, [right_tokens])
		self.dot_pos = dot_pos
		self.acc_tokens = set()
		for token in acc_tokens:
			self.acc_tokens.add(token)
		if piority is not None:
			self.piority = piority
		else :
			right_tokens = self.get_right_tokens()
			piority = 0
			i = len(right_tokens) - 1
			while i >= 0:
				if tokens.is_terminal(right_tokens[i]):
					piority = right_tokens[i].piority
					break
				i -= 1
			self.piority = piority
Example #6
0
 def __init__(self,
              left_token,
              right_tokens,
              dot_pos,
              acc_tokens,
              piority=None):
     super(LR1_Express, self).__init__(left_token, [right_tokens])
     self.dot_pos = dot_pos
     self.acc_tokens = set()
     for token in acc_tokens:
         self.acc_tokens.add(token)
     if piority is not None:
         self.piority = piority
     else:
         right_tokens = self.get_right_tokens()
         piority = 0
         i = len(right_tokens) - 1
         while i >= 0:
             if tokens.is_terminal(right_tokens[i]):
                 piority = right_tokens[i].piority
                 break
             i -= 1
         self.piority = piority