Пример #1
0
def main():
    for line in open('test.txt', 'r'):
        #line = "(a>b)||(c<d)&&(e!=f);\n"
        line = line[:len(line) - 1]  #skip the /n
        print("Original lex:{line}".format(**locals()))
        statement = line
        lex = Lex(statement)
        parse = Parse(lex)
Пример #2
0
 def __init__(self):
     lex = Lex()
     self.tokens = lex.tokens
     #Definição das precedencias
     self.precedence = (
         ('left', 'COMPARACAO', 'MAIOR_IGUAL', 'MAIOR', 'MENOR_IGUAL', 'MENOR'),
         ('left', 'MAIS', 'MENOS'),
         ('left', 'MULT', 'DIVIDE'),
     )
     arq = open(sys.argv[1], 'r', encoding='utf-8')
     data = arq.read()
     parser = yacc.yacc(debug=False, module=self, optimize=False)
     self.ps = parser.parse(data)
Пример #3
0
	def inicia(self):
		case = 0
		lex = Lex()

		# # 1 ER PALAVRAS RESERVADAS
		a1 = lex.lexer('reservado', case)
		dict = a1.getDictAutomato()
		case += len(dict)

		# # 2 ER IDENTIFICADORES
		a2 = lex.lexer('identificadores', case)

		# # 3 GRAMATICA DE SESPECIAL
		terminais = ['+', '-', '=', '/', '*', '>', '<', '!']
		nTerminais = ['S']
		producoes = {'S': ['+', '-', '=', '/', '*', '>', '<', '!']}
		inicial = 'S'
		g = Grammar(producoes,terminais, nTerminais, inicial)
		s, i, f = g.convertGtoAF()
		a3 = Automato(s, i, f)
		a3.determina()
		a3.printAtomato()
		print("\n")

		dict = a2.getDictAutomato()
		case += len(dict)
		a3 = lex.renameState(a3, case)

		# # 4 GRAMATICA SEPARADORES
		terminais2 = [':',';', ' ', '(', ')', '[', ']', ',', '\n']
		nTerminais2 = ['S']
		producoes2 = {'S': [':',';', ' ', '(', ')', '[', ']', ',', '\n']}
		inicial2 = 'S'
		g = Grammar(producoes2,terminais2, nTerminais2, inicial2)
		s2, i2, f2 = g.convertGtoAF()
		a4 = Automato(s2, i2, f2)
		a4.determina()
		a4.printAtomato()
		print("\n")

		dict = a3.getDictAutomato()
		case += len(dict)
		a4 = lex.renameState(a4, case)

		# ER CONSTANTES
		dict = a4.getDictAutomato()
		case += len(dict)
		a5 = lex.lexer('constantes', case)
		r = a5

		r = a1.oU([a2, a3, a4, a5])
		print ("\n")
		r.determina()
		r.printAtomato()

		with open('automato.pkl', 'wb') as output:
		    pickle.dump(r, output, pickle.HIGHEST_PROTOCOL)
Пример #4
0
import os
import sys
from lex import Lex
from cminus import cminus
# main function
fileName = sys.argv[1]
lexer = Lex()
if (lexer.fileFound(fileName)):
	lexer.removeComments(fileName)
	serializedTokens = lexer.getTokens()
	tokens = lexer.tokenAnalyzer(serializedTokens)
	#os.remove("temp.l")
	#os.remove("tempTokens")
	parser = cminus(tokens)
	#parser.nextToken()
	parser.Program()
	parser.printQuad()
Пример #5
0
	def _ini_(self):
		lex = Lex()
		self.a1 = lex.lexer('reservados')
		self.a2 = lex.lexer('identicadores')
Пример #6
0
import os
import RPi.GPIO as GPIO
from lex import Lex
from subprocess import run
import hotword

basedir = os.path.dirname(os.path.abspath(__file__))
pin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pin, GPIO.OUT)

lex = Lex()
stop_recording = False


def record_and_post():
    GPIO.output(18, GPIO.HIGH)
    response = lex.post_content()
    GPIO.output(18, GPIO.LOW)
    state = lex.play_response(response)

    print('State:', state)

    if state == 'Fulfilled':
        detect()
    elif state != 'Failed':
        record_and_post()
    else:
        stop_recording = True
        detect()
Пример #7
0
def inicia(args):
    input = entrytext.get()
    bd = AutomatoBD()
    bd.inicia()
    lex = Lex()
    lex.identificaToken(input)
Пример #8
0
def compila(input):
    input = entrytext.get()
    lex = Lex()
    lex.identificaToken(input)
Пример #9
0
from parse import Parser_LL1
from lex import Lex
for line in open('test.txt','r'):
    #skip the \n
    statement = line[:len(line)-1] 
    print("Original lex:{statement}".format(**locals()))
    statement += '%'#add the special end of line symbol
    lex = Lex(statement)
    print("Parsed lex:", end = "")
    parser = Parser_LL1(lex)





        
Пример #10
0
__author__ = 'lucasmpalma and luizu'
from lex import Lex
from automatoBD import AutomatoBD
import Tkinter as Tk
import ttk 

lex = Lex()
lex.identificaToken('algoritmos/meuPrograma.txt')
# a = AutomatoBD()
# a.inicia()
Пример #11
0
from lex import Lex as Lex
import os as os
lex_un = Lex()


# Processes argument with extra carat char with no reason
def process_in(in_file):
    if (in_file.startswith("^")):
        in_file_correct = in_file[1:]
        return in_file_correct
    else:
        return in_file


# Creates the file name of the output file
# Outputs are in Java
def gen_name(in_file):
    base = os.path.basename(in_file)
    ind = base.index(".blsp")
    name = base[0:1].capitalize() + base[1:ind] + ".java"
    return name


# High complexity Interpreter, someone help
def parse_file(in_file):
    file_in = process_in(in_file)
    if (is_bsp(file_in) == False):
        return 1

    genned_name = gen_name(file_in)
    with open(file_in, 'r') as file:
Пример #12
0
from lex import Lex
from parser import Parser

# take input mathematical equation from command line
input = input("Enter the equation to calculate:")

# passing the input expression to Lexer to convert into tokens
lexer = Lex().get_lexer()
tokens = lexer.lex(input)

# Calling parser module to parse the expression and perform calculations
parseGen = Parser()
parseGen.parse()
parser = parseGen.get_parser()
parser.parse(tokens).eval()
Пример #13
0
    class ParseHandler(object):

        def __init__(self):
            self.ast = None
            self.exps = []

        def shift(self, token):
            print(colorful('移近 {0} {1}'.format(token[0], token[1]), "Cyan"))

        def reduce(self, grammar):
            """
            规约动作, grammar为规约用的产身世
            """
            print('利用规则' + colorful('%s -> %s' % (grammar[0], ' '.join(grammar[1])), "Green"))

    # from util import colorful
    lex = Lex()
    lex.keyword = ['lambda', '[', ']', 'let', 'define', 'if', 'cond', 'or', 'and', '(', ')']
    lex.read_lex('regular_lex.txt')
    lex.compile(grammar_type="regular")
    print(colorful('词法编译完成...', 'Yellow'))

    parser = LRParser()
    parser.read_grammar('schepy_grammar.txt')
    parser.compile()
    print(colorful('语法编译完成...', 'Yellow'))
    parser.show_dfa()

    while True:
        parser.parse(lex.lex(raw_input(), ignore=["limit"]), ParseHandler())
Пример #14
0
def sintetic(input):
    lex = Lex()
    lex.identificaToken(input)
    file = open('../testes/'+'tokens.txt', 'r')
    actual = ''
    pilha = ['$', '<S>']
    gambs = [';', ',']
    aspas = '******'
    novo = ''
    for line in file:
        for item in line:
            # if item != '(' and item != ')':
            actual += item
            if item == '\n':
                # tratamentos
                if 'PR' in actual:
                    actual = actual.replace(", PR)\n", '')
                    actual = actual.replace("(", '')
                elif ' , SP)\n' in actual or actual == '(\\n, SP)\n':
                	actual = ''
                        break
                elif 'SP' in actual:
                	actual = actual[1]
                elif 'PR' not in actual and 'SP' not in actual:
                    actual = actual[-4]+actual[-3]
                # algoritimo
                while actual != pilha[len(pilha)-1]:

                    if actual == pilha[len(pilha)-1]:
                        pilha.pop()
                    else:
                        elemento = pilha[len(pilha)-1]
                        try:
                            j = terminais[actual]
                            i = nTerminais[elemento]
                        except KeyError:
                            return "ERRO SINTATICO"
                        else:
                            producao = arr[i][j]
                            if producao == "&":
                                pilha.pop()
                            else:
                                if producao == 'E':
                                    return "ERRO SINTATICO"
                                else:
                                    tamanho = len(producao)
                                    pilha.pop()
                                    concatena = ''
                                    for num in range(tamanho-1, -1, -1):
                                        if producao[num] not in gambs:
                                            concatena += producao[num]
                                        if producao[num] in gambs:
                                            pilha.append(producao[num])
                                            concatena = ''
                                        if producao[num] == ' ' or num == 0:
                                            concatena = concatena[::-1]
                                            if ' ' in concatena:
                                                concatena = concatena.replace(' ', '')
                                            if """ " """ not in concatena and concatena != '':
                                                pilha.append(concatena)
                                                concatena = ''
                pilha.pop()
                actual = ''
    if pilha[len(pilha)-1] == '$':
        return "SINTATICAMENTE CORRETO"
    else:
        return "ERRO SINTATICO"
Пример #15
0
import readline
if 'libedit' in readline.__doc__:
    readline.parse_and_bind("bind ^I rl_complete")
else:
    readline.parse_and_bind("tab: complete")

import crash_on_ipy

crash_on_ipy.init()

from lex import Lex
from parser import LRParser
from util import colorful
from runtime import ParseHandler, Env

LEX = Lex()
LEX.keyword = ['lambda', '[', ']', 'let', 'define', 'if',
               'cond', 'or', 'and', '(', ')', '$T', '$F']
LEX.read_lex('regular_lex.txt')
LEX.compile(grammar_type="regular")
# lex.read_lex('regex_lex.txt')
# lex.compile()
print(colorful('词法编译完成...', 'Yellow'))
PARSER = LRParser()
PARSER.read_grammar('schepy_grammar.txt')
PARSER.compile()
print(colorful('语法编译完成...', 'Yellow'))
GLOBAL_ENV = Env.std_env()
while True:
    try:
        HANDLER = ParseHandler()
Пример #16
0
# I.N.T.E.I.R.O
# T.E.X.T.O
# A.R.R.A.N.J.O
# I.N.I.C.I.O.:
# E.N.Q.U.A.N.T.O
# C.O.N.C.L.U.I.D.O
# S.E.:
# S.E.N.A.O.:
# L.E.I.A
# M.O.S.T.R.A
# E
# O.U
# N.A.O
# F.I.N.A.L

lex = Lex()
reservadas = lex.lexer()
print(reservadas.aceita('MOLEIA '))
# reservadas.printAtomato()
# reservadas.organizaAutomato()
# reservadas.determina()
# reservadas.montaAutomato()
# reservadas.min()
# reservadas.printAtomato()
# print(reservadas.getFinais())
# print(reservadas.aceita('LEIA'))
# reservadas.printAtomato()

# er = Er('((((((a|b)|c)|d)|e)|g)*)')
# automato = er.erToAutomato()
# automato.determina()