Example #1
0
def gen_table(file_name):
	SLR1_table = {}

	items_list,grammer_productions_list = getItemCollection(file_name)
	gram_symbol = get_grammer_symbol(grammer_productions_list)
	C_dict = get_cannonical_collection(items_list,grammer_productions_list,gram_symbol)
	follow = get_follow(file_name)
	
	non_terminals_list = []
	terminals_list = []
	for each_symbol in gram_symbol:
		if each_symbol.istitle():
			non_terminals_list.append(each_symbol)
		else:
			terminals_list.append(each_symbol)
	terminals_list.append('#')


	for each_statu in C_dict.keys():
		each_statu_for_SLR1_table_key = int(each_statu.split('_')[1])
		SLR1_table[each_statu_for_SLR1_table_key] = {}
		for each_item in C_dict[each_statu]:

			if not each_item.endswith('`'):
				char_behind = each_item[each_item.index('`')+1]
				for each_goto in DFA[each_statu]:
					edge_end = each_goto.strip('--')
					if edge_end.startswith(char_behind):
						end = edge_end.split('->I_')[1]
						if char_behind in terminals_list:
							SLR1_table[each_statu_for_SLR1_table_key][char_behind] = 'S'+end
						else:
							if char_behind not in SLR1_table[each_statu_for_SLR1_table_key].keys():
								SLR1_table[each_statu_for_SLR1_table_key][char_behind] = end

			elif each_item.endswith('`') and each_item.split('->')[0] != non_terminals_list[0]:
				left_part = each_item.split('->')[0]
				if each_item.strip('`') != grammer_productions_list[0]:
					for each_a in follow[left_part]:
						SLR1_table[each_statu_for_SLR1_table_key][each_a] = 'r'+str(grammer_productions_list.index(each_item.strip('`')))

			else:
				SLR1_table[each_statu_for_SLR1_table_key]['#'] = 'acc'

	return SLR1_table
        if next_action == 'acc':
            accpet = True
            analyse_result_array[-1].append('accpet')
            return next_action, analyse_result_array
        if next_action == 'error':
            return next_action, 0
        if next_action.startswith('S'):
            move_in(status_deque, symbol_deque, input_deque, next_action)
            analyse_result_array[-1].append('move_in')
        elif next_action.startswith('r'):
            destination = do_reduce(status_deque, symbol_deque, input_deque,
                                    next_action, SLR1_tabel,
                                    grammer_productions_list)
            analyse_result_array[-1].append(
                'reduce:' +
                grammer_productions_list[int(next_action.strip('r'))])
            if destination == 'error':
                return 'error', 0
    return '?'


if __name__ == '__main__':
    inputed_parse = parse_input()
    SLR1_tabel = gen_table('grammer.txt')
    items_list, grammer_productions_list = getItemCollection("grammer.txt")

    result, analyse_result_array = do_analyse(SLR1_tabel, inputed_parse,
                                              grammer_productions_list)
    print analyse_result_array
    print result
Example #3
0
from parse_input import parse_input
from gen_table import gen_table
from collections import deque
from grammer_symbol import get_grammer_symbol
from ItemCollection import getItemCollection
from analyse import do_analyse
from cannonical_collection import get_cannonical_collection
from cannonical_collection import DFA
from pylsy import pylsytable

if __name__ == '__main__':
	file_name = raw_input('input file name which contains grammer(eg:grammer.txt):>').strip()
	# file_name = 'grammer.txt'
	SLR1_tabel = gen_table(file_name)
	items_list,grammer_productions_list = getItemCollection(file_name)
	print 'augmented grammar is:\nG\':'
	for each in grammer_productions_list:
		print each

	
	non_terminals_list = []
	terminals_list = []
	grammer_symbol = get_grammer_symbol(grammer_productions_list)
	C_dict = get_cannonical_collection(items_list,grammer_productions_list,grammer_symbol)
	for each_symbol in grammer_symbol:
		if each_symbol.istitle():
			non_terminals_list.append(each_symbol)
		else:
			terminals_list.append(each_symbol)
		# print ('{0:3} {1:28} {2:28} {3:>50}'.format(row_count,\
		# 	str(status_deque).strip('deque()'),\
		# 	str(symbol_deque).strip('deque()'),\
		# 	str(input_deque).strip('deque()')))
		# print row_count,'  ',status_deque,'  ',symbol_deque,'  ',input_deque,'  '
		next_action = ACTION(status_deque[-1],input_deque[0],SLR1_tabel)
		if next_action == 'acc':
			accpet = True
			analyse_result_array[-1].append('accpet')
			return next_action,analyse_result_array
		if next_action == 'error':
			return next_action,0
		if next_action.startswith('S'):
			move_in(status_deque,symbol_deque,input_deque,next_action)
			analyse_result_array[-1].append('move_in')
		elif next_action.startswith('r'):
			destination = do_reduce(status_deque,symbol_deque,input_deque,next_action,SLR1_tabel,grammer_productions_list)
			analyse_result_array[-1].append('reduce:'+grammer_productions_list[int(next_action.strip('r'))])
			if destination == 'error':
				return 'error',0
	return '?'

if __name__ == '__main__':
	inputed_parse = parse_input()
	SLR1_tabel = gen_table('grammer.txt')
	items_list,grammer_productions_list = getItemCollection("grammer.txt")

	result,analyse_result_array = do_analyse(SLR1_tabel,inputed_parse,grammer_productions_list)
	print analyse_result_array
	print result
Example #5
0
from parse_input import parse_input
from gen_table import gen_table
from collections import deque
from grammer_symbol import get_grammer_symbol
from ItemCollection import getItemCollection
from analyse import do_analyse
from cannonical_collection import get_cannonical_collection
from cannonical_collection import DFA
from pylsy import pylsytable

if __name__ == '__main__':
    file_name = raw_input(
        'input file name which contains grammer(eg:grammer.txt):>').strip()
    # file_name = 'grammer.txt'
    SLR1_tabel = gen_table(file_name)
    items_list, grammer_productions_list = getItemCollection(file_name)
    print 'augmented grammar is:\nG\':'
    for each in grammer_productions_list:
        print each

    non_terminals_list = []
    terminals_list = []
    grammer_symbol = get_grammer_symbol(grammer_productions_list)
    C_dict = get_cannonical_collection(items_list, grammer_productions_list,
                                       grammer_symbol)
    for each_symbol in grammer_symbol:
        if each_symbol.istitle():
            non_terminals_list.append(each_symbol)
        else:
            terminals_list.append(each_symbol)