if next_action == 'acc':
            accpet = True
            analyse_result_array[-1].append('accpet')
            return next_action, analyse_result_array
        if next_action == 'error':
            return next_action, 0
        if next_action.startswith('S'):
            move_in(status_deque, symbol_deque, input_deque, next_action)
            analyse_result_array[-1].append('move_in')
        elif next_action.startswith('r'):
            destination = do_reduce(status_deque, symbol_deque, input_deque,
                                    next_action, SLR1_tabel,
                                    grammer_productions_list)
            analyse_result_array[-1].append(
                'reduce:' +
                grammer_productions_list[int(next_action.strip('r'))])
            if destination == 'error':
                return 'error', 0
    return '?'


if __name__ == '__main__':
    inputed_parse = parse_input()
    SLR1_tabel = gen_table('grammer.txt')
    items_list, grammer_productions_list = getItemCollection("grammer.txt")

    result, analyse_result_array = do_analyse(SLR1_tabel, inputed_parse,
                                              grammer_productions_list)
    print analyse_result_array
    print result
Example #2
0
__author__ = 'SkywalkerAtlas'

from parse_input import parse_input
from gen_table import gen_table
from collections import deque
from grammer_symbol import get_grammer_symbol
from ItemCollection import getItemCollection
from analyse import do_analyse
from cannonical_collection import get_cannonical_collection
from cannonical_collection import DFA
from pylsy import pylsytable

if __name__ == '__main__':
	file_name = raw_input('input file name which contains grammer(eg:grammer.txt):>').strip()
	# file_name = 'grammer.txt'
	SLR1_tabel = gen_table(file_name)
	items_list,grammer_productions_list = getItemCollection(file_name)
	print 'augmented grammar is:\nG\':'
	for each in grammer_productions_list:
		print each

	
	non_terminals_list = []
	terminals_list = []
	grammer_symbol = get_grammer_symbol(grammer_productions_list)
	C_dict = get_cannonical_collection(items_list,grammer_productions_list,grammer_symbol)
	for each_symbol in grammer_symbol:
		if each_symbol.istitle():
			non_terminals_list.append(each_symbol)
		else:
			terminals_list.append(each_symbol)
		# print ('{0:3} {1:28} {2:28} {3:>50}'.format(row_count,\
		# 	str(status_deque).strip('deque()'),\
		# 	str(symbol_deque).strip('deque()'),\
		# 	str(input_deque).strip('deque()')))
		# print row_count,'  ',status_deque,'  ',symbol_deque,'  ',input_deque,'  '
		next_action = ACTION(status_deque[-1],input_deque[0],SLR1_tabel)
		if next_action == 'acc':
			accpet = True
			analyse_result_array[-1].append('accpet')
			return next_action,analyse_result_array
		if next_action == 'error':
			return next_action,0
		if next_action.startswith('S'):
			move_in(status_deque,symbol_deque,input_deque,next_action)
			analyse_result_array[-1].append('move_in')
		elif next_action.startswith('r'):
			destination = do_reduce(status_deque,symbol_deque,input_deque,next_action,SLR1_tabel,grammer_productions_list)
			analyse_result_array[-1].append('reduce:'+grammer_productions_list[int(next_action.strip('r'))])
			if destination == 'error':
				return 'error',0
	return '?'

if __name__ == '__main__':
	inputed_parse = parse_input()
	SLR1_tabel = gen_table('grammer.txt')
	items_list,grammer_productions_list = getItemCollection("grammer.txt")

	result,analyse_result_array = do_analyse(SLR1_tabel,inputed_parse,grammer_productions_list)
	print analyse_result_array
	print result
Example #4
0
from parse_input import parse_input
from gen_table import gen_table
from collections import deque
from grammer_symbol import get_grammer_symbol
from ItemCollection import getItemCollection
from analyse import do_analyse
from cannonical_collection import get_cannonical_collection
from cannonical_collection import DFA
from pylsy import pylsytable

if __name__ == '__main__':
    file_name = raw_input(
        'input file name which contains grammer(eg:grammer.txt):>').strip()
    # file_name = 'grammer.txt'
    SLR1_tabel = gen_table(file_name)
    items_list, grammer_productions_list = getItemCollection(file_name)
    print 'augmented grammar is:\nG\':'
    for each in grammer_productions_list:
        print each

    non_terminals_list = []
    terminals_list = []
    grammer_symbol = get_grammer_symbol(grammer_productions_list)
    C_dict = get_cannonical_collection(items_list, grammer_productions_list,
                                       grammer_symbol)
    for each_symbol in grammer_symbol:
        if each_symbol.istitle():
            non_terminals_list.append(each_symbol)
        else:
            terminals_list.append(each_symbol)