示例#1
0
def main(argv):
    # The with statement allows objects like files to be used in a way
    # that ensures they are always cleaned up promptly and correctly.
    with open(argv[1], 'r') as f:
        a = Grammar()
        a.readgr(f)
    # print("\nSpecial states:"); print(a.asterisk)
    # print("\nSpecial states children:"); print(a.ignore)
    # print("\nGrammar:"); a.printgr()

    b = NDFA().builtWith(a)
    # print("\nNDFA:"); b.printndfa()
    # print("\nNDFA final states: {}\n".format(b.finals))
    # print(b.to_csv())

    c = b.to_dfa()
    # print("\nDFA:"); c.printdfa()
    # print("\nDFA final states: {}\n".format(c.finals))
    # print(c.to_csv())

    p = Parser(c)
    if argv[2] is not None:
        with open(argv[2], 'r') as f:
            p.parse(f)
        print("\nSymbol table: {}\n".format(p.table))
    else:
        print("No source code input")
示例#2
0
文件: test.py 项目: gpuweb/gpuweb
 def test_Equal_Lookahead(self):
     i0c = self.is_C_0(la=Grammar.LookaheadSet({self.c}))
     i0d = self.is_C_0(la=Grammar.LookaheadSet({self.d}))
     self.assertEqual(i0c, i0c)
     self.assertEqual(i0d, i0d)
     self.assertFalse(i0c == i0d)
     self.assertFalse(i0d == i0c)
示例#3
0
def main():
    #DATOS PARA MI LISTA DE PRODUCCIONES DE MI GRAMATICA DE PRUEBA
    productionsTest = ['S>zMNz', 'M>aMa', 'N>bNb', 'N>z', 'M>z']
    #DATOS PARA MI GRAMATICA DE PRUEBA
    nonTerminalsTest = ["S", "M", "N", "T"]
    terminalsTest = ["a", "b", "z"]
    initialTest = "S"
    productionsTest = makeProductionList(productionsTest)
    saveProductions(productionsTest)
    productionSaved = OP  #guardo mis producciones para mi gramtica de prueba
    grammarTest = Grammar("Test", nonTerminalsTest, terminalsTest,
                          productionSaved, initialTest, productionsTest)
    print(grammarTest.toString())
    Gramaticas.append(grammarTest)
    tree(Gramaticas[0])
    #EMPIEZO CON LA CARATULA
    print("")
    print("*********************************")
    print("Lenguajes formales de programación")
    print("")
    print("Sección B+")
    print("")
    print("Carnet => 201800624")
    print("")
    print("**********************************")
    print()
    while start:
        print(">> ", end="")
        m = str(msvcrt.getch(), 'utf -8')
        if m == "\r":
            os.system("clear")
            menu()
示例#4
0
文件: test.py 项目: gpuweb/gpuweb
 def test_Less_Lookahead_ClosedTF(self):
     i0c = self.is_C_0(closed=True, la=Grammar.LookaheadSet({self.c}))
     i0d = self.is_C_0(closed=False, la=Grammar.LookaheadSet({self.d}))
     # We only compare on content, never by the index. So closure
     # doesn't matter here.
     self.assertLess(i0c, i0d)
     self.assertGreater(i0d, i0c)
示例#5
0
文件: test.py 项目: gpuweb/gpuweb
 def test_Equal_Lookahead_ClosedTF(self):
     i0c = self.is_C_0(closed=True, la=Grammar.LookaheadSet({self.c}))
     i0d = self.is_C_0(closed=False, la=Grammar.LookaheadSet({self.d}))
     self.assertEqual(i0c, i0c)
     self.assertEqual(i0d, i0d)
     self.assertFalse(i0c == i0d)
     self.assertFalse(i0d == i0c)
示例#6
0
文件: test.py 项目: gpuweb/gpuweb
 def test_L_end_alone(self):
     i0 = self.iL()
     i0_ = Grammar.ItemSet(self.g, {i0: self.l_end}).close(self.g)
     self.assertFalse(i0_.is_accepting())
     i1 = self.iL(1)
     i1_ = Grammar.ItemSet(self.g, {i1: self.l_end})
     self.assertTrue(i1_.is_accepting())
示例#7
0
文件: test.py 项目: gpuweb/gpuweb
 def test_C_end_and(self):
     i0 = self.iC()
     i0_ = Grammar.ItemSet(self.g, {i0: self.l_end_and}).close(self.g)
     self.assertFalse(i0_.is_accepting())
     i1 = self.iC(1)
     i1_ = Grammar.ItemSet(self.g, {i1: self.l_end_and}).close(self.g)
     self.assertFalse(i1_.is_accepting())
示例#8
0
    def listen(self):
        (clientsocket, address) = self.serversocket.accept()
        while True:
            #(clientsocket, address) = self.serversocket.accept()
            print("Connection found")
            try:
                data = clientsocket.recv(1024).decode("utf-8")
                print(data)
                processed_action = data.split(",")
                function = processed_action[0]
                myinput = processed_action[1]
                print("My input: "+ myinput)

                if function == "Stock":
                    stock_info = stock.findStock(myinput)
                    clientsocket.send((stock_info + "\n").encode("utf-8"))
                    print("Sent stock information " + stock_info )
                elif function == "Email":
                    #Read text file, write input from Unity into text file
                    #message = processed_action[2]
                    email.emailBody(myinput)
                    email.main()
                    confirmation = "Email has been sent"
                    #clientsocket.send(confirmation.encode("utf-8"))
                elif function == "Call":
                    #Read in message if user calls or sends message
                    #message = processed_action[2]
                    #cm.outgoingCall(myinput,message)
                    cm.outgoingCall(myinput)
                    confirmation = "Called Phone"
                    #clientsocket.send(confirmation.encode("utf-8"))
                elif function == "SMS":
                    message = processed_action[2]
                    cm.sendMessage(myinput,message)
                    confirmation = "SMS message has been sent"
                    #clientsocket.send(confirmation.encode("utf-8"))
                elif function == "Definition":
                    grammar.Definition(myinput)
                    confirmation = "Found defintion"
                    #clientsocket.send(confirmation.encode("utf-8"))
                elif function == "Synonym":
                    grammar.Synonym(myinput)
                    confirmation = "Found synonym"
                    #clientsocket.send(confirmation.encode("utf-8"))
                elif function == "Pizza":
                    myOrder = pizza.parseHololensOrder(myinput)
                    pizza.placeOrder(myOrder)
                    confirmation = "Ordered Pizza (with TBD)"
                    #clientsocket.send(confirmation.encode("utf-8"))
                '''elif function == "Light":
                    lights.turnOn()
                    confirmation = "Adjust Hue Light"
                    #clientsocket.send(confirmation.encode("utf-8"))
                elif data == "ping":
                    print ("Unity Sent: " + str(data))
                    #clientsocket.send("pong")
                print("closed socket")'''
            finally:
                clientsocket.close()
示例#9
0
def test1():
    g = Grammar({
        'S': {'AB'},  # P
        'A': {'BB', 'a'},
        'B': {'AB', 'b'}
    })
    gnf = g.cnf_to_gnf()
    print(gnf)
示例#10
0
def test3():
    g = Grammar({
        'S': {'CA', 'BB'},  # P
        'C': {'b'},
        'A': {'a'},
        'B': {'SB', 'b'}
    })
    gnf = g.cnf_to_gnf()
    print(gnf)
示例#11
0
def test2():
    g = Grammar({
        'S': {'AB', 'BC'},  # P
        'A': {'BA', 'a'},
        'B': {'CC', 'b'},
        'C': {'AB', 'a'}
    })
    gnf = g.cnf_to_gnf()
    print(gnf)
示例#12
0
def test1():
    g = Grammar (
        { 'S', 'A', 'B' }, # V
        { 'a', 'b' }, # T
        'S', # S
        { 'S': { 'AB' }, # P
        'A': { 'BB', 'a' },
        'B': { 'AB', 'b' } } 
    )
    #w = 'aabbb'
    w = 'aab'
    print(g.validate(w))
示例#13
0
def test2():
    g = Grammar(
        {'A', 'B', 'C', 'S'},  # N
        {'a', 'b', 'c'},  # T
        {
            'S': {'ABAC'},  # P
            'A': {'aA', chr(949)},
            'B': {'bB', chr(949)},
            'C': {'c'}
        })
    print(g)
    g.remove_null_productions()
示例#14
0
def tocnftest():
    g = Grammar (
        { 'S', 'A', 'B' }, # V
        { 'a', 'b', 'c' }, # T
        'S', # S
        { # P
            'S': { 'ABa' }, 
            'A': { 'aab' },
            'B': { 'Ac' }
        } 
    )
    g.fcg_to_cnf()
示例#15
0
文件: test.py 项目: gpuweb/gpuweb
 def setUp(self):
     self.g = Grammar.Grammar.Load(DRAGON_BOOK_EXAMPLE_4_42,
                                   'translation_unit')
     self.L = self.g.rules[Grammar.LANGUAGE]
     self.C = self.g.rules["C"]
     self.c = self.g.rules["c"]
     self.d = self.g.rules["d"]
     self.l_empty = Grammar.LookaheadSet({})
     self.l_end = Grammar.LookaheadSet({self.g.MakeEndOfText()})
     self.l_end_and = Grammar.LookaheadSet(
         {self.g.MakeFixed('end'),
          self.g.MakeEndOfText()})
示例#16
0
def test2():
    g = Grammar (
        { 'S', 'A', 'B', 'C' }, # V
        { 'a', 'b' }, # T
        'S', # S
        { 'S': { 'AB', 'BC' }, # P
        'A': { 'BA', 'a' },
        'B': { 'CC', 'b' }, 
        'C': { 'AB', 'a' } } 
    )
    w = 'baaba'
    print(g.validate(w))
    g.fcgtocnf()
def convertToTuples(grammar):
	# Transform grammar to tuple based grammar
	new_grammar = Grammar()
	for bc_key in grammar.rules:
		for rule in grammar.rules[bc_key]:
			if len(rule.constituents) == 2:
				tpl = (rule.constituents[0], rule.constituents[1])
				if tpl in new_grammar.rules: new_grammar.rules[tpl].append(rule)
				else: new_grammar.rules[tpl] = [rule]
			else:
				c = rule.constituents[0]
				if c in new_grammar.rules: new_grammar.rules[c].append(rule)
				else: new_grammar.rules[c] = [rule]

	return new_grammar
def example_lexicalTree():
    lexParser = LexiconParser()
    entries = lexParser.parse_file("lexicon.txt")
    lexicon = Lexicon(list(set(entries)))
    grammar = Grammar()
    r1b = grammar.rules['R-1b']
    r1a = grammar.rules['R-1a']

    mitka = lexicon.getEntry("Mitka")
    walks = lexicon.getEntry("walks")
    ltree = LexicalTree(rule=r1b, a=mitka, b=walks)
    print(ltree.evaluate())
    print(ltree)

    porky = lexicon.getEntry("Porky")
    zinkly = lexicon.getEntry("Zinkly")
    likes = lexicon.getEntry("likes")
    lptree = LexicalTree(rule=r1a, a=likes, b=porky)
    print(lptree.evaluate())
    zlptree = LexicalTree(rule=r1b, a=zinkly, b=lptree)
    print(zlptree.evaluate())   # This evaluates to undefined
    print("The above should be undefined")

    # likes == m=(m=1 p=1) p=(m=0 p=1) z=(m=0)
    # let's add something to make zlptree evaluate to a value
    print(likes.semantics)
    likes.semantics.update({"p": {"z": "1"}})
    print(likes.semantics)
    print("This should now evaluate to 1:")
    print(zlptree.evaluate())   # Now this evaluates to 1
示例#19
0
文件: Parser.py 项目: iistrate/Parser
 def __init__(self, file):
     self.__m_file = file
     self.__m_Grammar = Grammar()
     self.__m_tokens = []
     self.__m_cursor = 0
     #cursor starts at 0, line at 1
     self.__m_line = self.__m_cursor + 1
示例#20
0
def parse_pn_dict(root, file):

    for entry in root:
        katakana = ""
        translation = ""

        r_ele = entry.find("r_ele")
        if r_ele is not None:
            reb = r_ele.find("reb")
            katakana = reb.text

        trans = entry.find("trans")
        if trans is not None:
            trans_det = trans.find("trans_det")
            if trans_det is not None:
                translation = trans_det.text

        translation = re.sub(" ?\(.*\)", "", translation)

        if not Grammar.is_katakana(katakana):
            continue
        if translation == "":
            continue
        if len(translation) > 12:
            continue
        if translation.find(" ") != -1:
            continue

        file.write(katakana + "\t" + translation + "\n")
示例#21
0
    def getPlayer(self):
        ''' Choose a player from those available '''

        # Read the text from the current widget

        text = self.widget.read()

        # Find all the players

        self.players = {}

        for _str, _dict in Grammar.playerData(text):

            # Store dict on the syntax involving a player

            self.players[_str] = _dict

        # Choose whether to create a new one (right now - do not)

        if len(self.players) == 0:

            return None  # dict of a new blank player

        else:

            return choice(self.players.keys())
示例#22
0
文件: test.py 项目: gpuweb/gpuweb
 def setUp(self):
     self.g = Grammar.Grammar.Load(DRAGON_BOOK_EXAMPLE_4_42,
                                   'translation_unit')
     self.C = self.g.rules["C"]
     self.c = self.g.rules["c"]
     self.d = self.g.rules["d"]
     self.el = Grammar.LookaheadSet({})
示例#23
0
def add_frequency(dictionary):
    freq_file_path = os.path.join(
        Constants.PROJECT_DIR, "..", "data", "LeedsWordFrequency",
        "44492-japanese-words-latin-lines-removed.txt")
    frequencies = open(freq_file_path, "r", encoding="utf-8")

    line = frequencies.readline()
    i = 0
    while line:
        i += 1
        line = line[:-1]
        is_hiragana = Grammar.is_hiragana(line)

        entries = dictionary.get(line, [])
        if is_hiragana:
            entries = [
                e for e in entries if Grammar.Grammar.USUALLY_KANA in e.misc
            ]
        else:
            entries = [
                e for e in entries
                if Grammar.Grammar.USUALLY_KANA not in e.misc
            ]

        for entry in entries:
            entry.priority = i

        line = frequencies.readline()
示例#24
0
    def defineInstructions(self, original, syntax):

        new = Grammar.createPlayer(**syntax)

        self.instructions.put((original, new))

        return
示例#25
0
    def __init__(self):

        rospy.init_node("SpeechRecognition")
        rospy.on_shutdown(self.shutdown)

        rospy.Service("DragonSpeech/set_grammar", SetGrammar, self.set_gam_srv)
        self.pub_recogres = rospy.Publisher('DragonSpeech/sentence',
                                            AudioSentence,
                                            queue_size=10)
        self.pub_lures = rospy.Publisher("DragonSpeech/luresult",
                                         AudioLUResult,
                                         queue_size=10)

        self.gram = Grammar.Grammar()
        self.gram.load("sample.txt")
        self.valid_gram_id = []

        #
        pattern = "|".join([
            "%c%c" % (A, a)
            for A, a in zip(range(ord("A"),
                                  ord("Z") + 1), range(ord("a"),
                                                       ord("z") + 1))
        ])
        print pattern
        self.large_letter_finder = re.compile(pattern)

        SRWindow.start_speech_recog(self.recog_callback, self.set_gram_file)
示例#26
0
def ask_productions():
    """ Obtiene una gramatica por teclado. """
    print("\n Programa que transforma una gramatica de su FNC a la FNG.\n")
    nonterminals = set(
        input("\nIngrese las variables no terminales, separadas por ,: ").
        replace(" ", "").split(','))
    start = input("Ingrese la variable inicial: ")
    productions = dict()
    print("Ingrese las productiones separadas por | : ")
    for value in nonterminals:
        ans = input("{} --> ".format(value)).replace(" ", "").split('|')
        productions[value] = set(ans)
    g = Grammar(productions, start)
    print("La gramatica en la FNG equivalente es: ")
    gnf = g.cnf_to_gnf()
    print(gnf)
    print("\nHasta luego. o-o// ")
示例#27
0
    def parseFile(self, name, doc):
        """
			Parse an Asterisk configuration file
			@type  name : string
			@param name : Configuration file to use
			@rtype: XML Node
			@return: Corresponding XML node for the file
		"""

        f = open(ASTERISK_HOME + name, 'r')

        output = Grammar.parse('newgoal', f.read())
        f.close()
        node = doc.createElementNS(self.namespace, 'file')
        node.setAttributeNS(self.namespace, 'name', name)

        tmp = node

        for line in output:
            try:
                if line.has_key('section'):
                    section = doc.createElementNS(self.namespace, 'section')
                    section.setAttributeNS(self.namespace, 'name',
                                           line['section'])
                    node.appendChild(section)
                    tmp = section

                elif line.has_key('attribute'):
                    attribute = doc.createElementNS(self.namespace,
                                                    'attribute')
                    attribute.setAttributeNS(self.namespace, 'name',
                                             line['attribute'])

                    value = doc.createTextNode(str(line['value']))

                    attribute.appendChild(value)

                    tmp.appendChild(attribute)

                elif line.has_key('comment'):
                    pass

                elif line.has_key('include'):

                    includeNode = doc.createElementNS(self.namespace,
                                                      'include')
                    include = doc.createTextNode(line['include'])
                    includeNode.appendChild(include)
                    tmp.appendChild(includeNode)

                elif line.has_key(''):

                    pass

            except:
                raise

        return node
示例#28
0
文件: main.py 项目: dluman/divinator
def main(args):
    args = parse(args)
    string = args[0]
    grammar = Grammar.Rules(string).apply()
    system = System.Gram()
    system.evaluate(grammar[string])
    if args[1] == 'polar':
        Figure.Polars(system)
    else:
        Figure.Trigrams(system)
def main():
    lexParser = LexiconParser()
    entries = lexParser.parse_file("lexicon.txt")
    lexicon = Lexicon(list(set(entries)))
    print(lexicon)
    grammar = Grammar()
    interactor = GrammarInteractor(grammar)
    interactor.populate_lexicon(lexicon)
    print("After populating:")
    print(lexicon)
示例#30
0
	def parseFile(self, name, doc):
		"""
			Parse an Asterisk configuration file
			@type  name : string
			@param name : Configuration file to use
			@rtype: XML Node
			@return: Corresponding XML node for the file
		"""
		
		f = open(ASTERISK_HOME + name, 'r')
		
		output = Grammar.parse('newgoal', f.read())
		f.close()
		node = doc.createElementNS(self.namespace, 'file')
		node.setAttributeNS(self.namespace, 'name', name)		
		
		tmp = node

		for line in output:
			try:
				if line.has_key('section'):
					section = doc.createElementNS(self.namespace, 'section')
					section.setAttributeNS(self.namespace, 'name', line['section'])
					node.appendChild(section)
					tmp = section
					
				elif line.has_key('attribute'):
					attribute = doc.createElementNS(self.namespace, 'attribute')
					attribute.setAttributeNS(self.namespace, 'name', line['attribute'])
									
					value = doc.createTextNode(str(line['value']))
									
					attribute.appendChild(value)
								
					tmp.appendChild(attribute)
						
				elif line.has_key('comment'):
					pass
					
				elif line.has_key('include'):
					
					includeNode = doc.createElementNS(self.namespace,'include')
					include = doc.createTextNode(line['include'])
					includeNode.appendChild(include)
					tmp.appendChild(includeNode)
					
				elif line.has_key(''):
					
					pass
						
			except:
				raise
				
		return node
示例#31
0
def delunittest():
    g = Grammar (
        { 'S', 'A', 'B' }, # V
        { 'a', 'b', 'c' }, # T
        'S', # S
        { # P
            'S': { 'Aa', 'B' }, 
            'A': { 'a', 'bc', 'B' },
            'B': { 'A', 'bb' }
        } 
    )
示例#32
0
def clickVerify():

    msg = "Error"

    input = simpledialog.askstring("Input", "Please enter the string", parent=root)

    try:
        vars = createVariables()
        grammar = Grammar(vars)
        result = grammar.stringVerifier(input)

        if (result == True):
            msg = "The string is generated by the grammar!"
        else:
            msg = "The string is NOT generated by the grammar!"

    except IndexError:
        msg = "Please enter a valid string"

    except Exception as e:
        msg = e
        
    messagebox.showinfo("Message", msg)
示例#33
0
def ask_productions():
    """ Obtiene una gramatica por teclado. """
    print("\n Programa que determina si una palabra pertenece o no a una GLC.\n")
    nonterminals = set(input("\nIngrese las variables no terminales, separadas por ,: ").
        replace(" ", "").split(','))
    terminals = set(input("Ingrese las variables terminales, separadas por ,: ").
        replace(" ", "").split(','))
    start = input("Ingrese la variable inicial: ")
    productions = dict()
    print("Ingrese las productiones separadas por | : ")
    for value in nonterminals:
        productions[value] = set(input("{} --> ".format(value)).replace(" ", "").split('|'))
    g = Grammar (nonterminals, terminals, start, productions)
    want_to_continue = 'y'
    while want_to_continue == 'y':
        print()
        word = input("Ingrese una palabra ")
        if g.validate(word):
            print("La palabra %s pertenece a L(G) :D " % word)
        else:
            print("La palabra %s no pertenece a L(G) D: " % word)
        want_to_continue = input("Desea continuar? (y/n): ")
    print("\nHasta luego. o-o// ")
示例#34
0
import numpy as np
import string

from Rule import *
from Grammar import *
from k_compression.k_sequitur import k_seq_compress
from k_seq_grammar_analysis import *

grammar = {'-2' : Rule('-2', ['-1', '3']),
           '-1' : Rule('-1', ['4', '5']),
           '-5' : Rule('-5', ['1', '3']),
           '-3' : Rule('-3', ['-1', '-5']),
           '-6' : gen_framing_rule('-3', '6', '7', 3, 2),
           '-4' : gen_power_rule('-4', '8', '9', 3)}

my_grammar = Grammar(grammar)
for rule in my_grammar.rule_dict.values():
    print rule.barcode

non_terminal_weights = np.random.power(5, len(my_grammar.non_terminals))
terminal_weights = np.random.power(5, len(my_grammar.terminals))

all_weights = list(non_terminal_weights)
all_weights.extend(list(terminal_weights))

freqs, reduced_form, expended_form =  my_grammar.rand_seq(1000, all_weights)

print freqs
print reduced_form
print ''.join(expended_form)
示例#35
0
from Grammar import *

Gr = Grammar('<thesis>')
Gr.read('thesis.grm')
print(Gr.generate())


示例#36
0
文件: Parser.py 项目: iistrate/Parser
class Parser(object):
    def __init__(self, file):
        self.__m_file = file
        self.__m_Grammar = Grammar()
        self.__m_tokens = []
        self.__m_cursor = 0
        #cursor starts at 0, line at 1
        self.__m_line = self.__m_cursor + 1

    def removeComments(self, string):
        #tested on http://pythex.org/
        comment = (re.compile('//[^\n]*|/*|\*[^\n]*', re.MULTILINE|re.DOTALL)).sub("", string)
        return comment

    def tokenize(self):
        #my reg exp
        symbols = '[' + re.escape(''.join(self.__m_Grammar.getLex()['symbol'])) + ']'
        keywords = '|'.join(self.__m_Grammar.getLex()['keyword'])
        statements = '|'.join(self.__m_Grammar.getLex()['statement'])
        program = '|'.join(self.__m_Grammar.getLex()['program'])
        nkeywords = '[\w\-]+'
        strings = r'"[^"]*"'
        numbers = '\d+'

        #get them all together
        match = re.compile(symbols + "|" + keywords + "|" + strings + "|" + nkeywords
                           + "|" + numbers + "|" + program + "|" + statements)

        for line in self.__m_file:
            #remove out comments
            line = self.removeComments(line)
            #remove newlines
            line = line.strip()
            #remove empty lines
            if (line):
                self.__m_tokens.append(match.findall(line))

    
    def __str__(self):
        rep = ""
        count = 1
        for token in self.__m_tokens:
            rep += "{:3d}{} \n".format(count, token)
            count += 1
        return rep

    #check if we have more tokens in the raw token list
    @property
    def hasMoreTokens(self):
        if self.__m_cursor < len(self.__m_tokens):
            return True
        return False

    def run(self):
        #self.testGrammar() #Tests for grammar
        self.tokenize()
        print(self.__str__())
        valid = True
        while self.hasMoreTokens:
            currentLine = self.__m_tokens[self.__m_cursor]
            try:
                #check for BEGIN; then END
                self.isProgram()
                self.checkUnknown(currentLine)
                #check if valid read
                if (currentLine[0].lower() == "read"):
                    self.isValidFunction(currentLine)
                #check if it starts with an identifier
                elif (self.__m_Grammar.isIdentifier(currentLine[0])):
                      #check if line is a statement
                      self.isValidStatement(currentLine)
                #check if valid write
                elif (currentLine[0].lower() == "write"):
                    self.isValidFunction(currentLine)
                #check if valid var
                elif self.__m_Grammar.isIntegerConstant(currentLine[0][0]):
                    raise Exception("Invalid var name at line {} got {}".format(self.__m_line, currentLine[0]))                        

            except Exception as customErr:
                print(customErr)
                valid = False
                break
                
            self.__m_cursor += 1
            self.updateLine()
         
        if (valid): print("File is valid, congrats you can write good sintax! Yay?")
    
    def checkUnknown(self, line):
        for token in line:
            if token in self.__m_Grammar.getLex()['unknown']:
                raise Exception("Unexpected token: {} on line {}, token not in language!".format(token, self.__m_line))

    def updateLine(self):
        self.__m_line = self.__m_cursor + 1

    def isValidStatement(self, line):
        self.isTerminated(line)
        #we know if started with an identifier
        count = 0
        for token in line:
            #is the next token :=
            if count == 1:
                if token != ":=":
                    raise Exception(self.errorExpectedToken(self.__m_line, ":=", token))
            #if + or -
            if token in self.__m_Grammar.getLex()['op']:
                #check before and after for identifiers or integer constants
                if (not self.__m_Grammar.isIdentifier(line[count-1])) and (not self.__m_Grammar.isIntegerConstant(line[count-1])):
                    raise Exception(self.errorExpectedToken(self.__m_line, "identifier or int on the left", line[count-1]))
                elif (not self.__m_Grammar.isIdentifier(line[count+1])) and (not self.__m_Grammar.isIntegerConstant(line[count+1])):
                    raise Exception(self.errorExpectedToken(self.__m_line, "identifier or int on the right", line[count+1]))
            count += 1

    def isValidFunction(self, line):
        #check if it is a valid statement
        self.isTerminated(line)
        count = 0
        for token in line:
            if (count == 1 and token != "("):
                raise Exception(self.errorExpectedToken(self.__m_line, "(", token))
            if (count == len(line)-2 and token != ")"):
                raise Exception(self.errorExpectedToken(self.__m_line, ")", token))
            if token == ',':
                #check before and after for identifiers or integer constants
                if (not self.__m_Grammar.isIdentifier(line[count-1])) and (not self.__m_Grammar.isIntegerConstant(line[count-1])):
                    raise Exception(self.errorExpectedToken(self.__m_line, "identifier or int on the left", line[count-1]))
                elif (not self.__m_Grammar.isIdentifier(line[count+1])) and (not self.__m_Grammar.isIntegerConstant(line[count+1])):
                    raise Exception(self.errorExpectedToken(self.__m_line, "identifier or int on the right", line[count+1]))
            if token == ';':
                if count != len(line) -1:
                    raise Exception("Invalid use of terminator ';' at line {} expected ','".format(self.__m_line))                     
            count += 1
    
    def isTerminated(self, line):
        if line[-1] != ";":
            raise Exception(self.errorExpectedToken(self.__m_line, "; as a terminator", line[-1]))

    #count left P and right P; at the end raise exception if !=
    def checkParentheses(self, line):
        leftP = 0
        rightP = 0
        for token in line:
            if token == '(':
                leftP += 1
            elif token == ')':
                rightP += 1
        if leftP == rightP:
            return
        #if here we have an error
        missing = "right"
        if leftP < rightP:
            missing = "left"

        raise Exception("Missing a {} parentheses on line {}".format(missing, self.__m_line))

    #check if program
    def isProgram(self):
        if self.__m_tokens[-1][0].lower() == "end" and self.__m_tokens[0][0].lower() == "begin":
            return
        elif self.__m_tokens[0][0].lower() != "BEGIN":
            raise Exception(self.errorExpectedToken(1, "BEGIN", self.__m_tokens[0][0]))
        elif self.__m_tokens[-1][0].lower() != "end":
            raise Exception(self.errorExpectedToken(len(self.__m_tokens)+1, "END", self.__m_tokens[-1][0]))
    
    #custom exception message
    def errorExpectedToken(self, lineNr, expected, got):
        return "Error at line #{}: expected {} not {}".format(lineNr, expected, got)

    #tests
    def testGrammar(self):
        print(self.__m_Grammar.isIdentifier("variable"))
        print(self.__m_Grammar.isIdentifier("1variable"))
        print(self.__m_Grammar.isIntegerConstant("500"))
        print(self.__m_Grammar.isIntegerConstant("b500"))
        print(self.__m_Grammar.isProgramKw("BEGIN"))
        print(self.__m_Grammar.isProgramKw("BLA"))
        print(self.__m_Grammar.isStringConstant("\"String bla bla\""))
        print(self.__m_Grammar.isStringConstant("String bla bla"))
        print(self.__m_Grammar.isSymbol("("))
        print(self.__m_Grammar.isSymbol("$"))
        print(self.__m_Grammar.isOp("+"))
        print(self.__m_Grammar.isOp("/"))
        print(self.__m_Grammar.isStatement(":="))
        print(self.__m_Grammar.isStatement("="))
示例#37
0
文件: g1.py 项目: lauraminor/haiku
from Grammar import *

g1 = Grammar("<haiku>")
g1.read("grammar1.grm")
print()
print(g1.generate())
print()
示例#38
0
def translate(sentence):
    if sentence is None and server is False:
        sentence = input("Enter your sentence and watch crazy things happen:")
    tokens = nltk.word_tokenize(sentence)
    tagged = nltk.pos_tag(tokens)
    Grammar.wordorder(tagged, sentence)
示例#39
0
from Grammar import *
g2 = Grammar('<epic_firstline>')
g2.read('grammar2.grm')
print()
print(g2.generate())
print()