Exemplo n.º 1
0
 def test2(self):
     lexerlist = Lexer('Q').tokenize(1)
     parserlist = Parser().parse(lexerlist)
     self.assertEqual(parserlist[0].kind, "propositions")
     self.assertEqual(parserlist[1].kind, "proposition")
     self.assertEqual(parserlist[2].kind, "atomic")
     self.assertEqual(parserlist[3].kind, "ID")
     self.assertEqual(parserlist[4].kind, "more-proposition")
     self.assertEqual(parserlist[5].kind, "epsilon")
Exemplo n.º 2
0
import numpy as np
import tensorflow as tf
import random
import sys, os
import json
import argparse
from parserr import Parser
from datamanager import DataManager
from actor import ActorNetwork
from LSTM_critic import LSTM_CriticNetwork
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#get parse
argv = sys.argv[1:]
parser = Parser().getParser()
args, _ = parser.parse_known_args(argv)
random.seed(args.seed)

#get data
dataManager = DataManager(args.dataset)
train_data, dev_data, test_data = dataManager.getdata(args.grained, args.maxlenth)
word_vector = dataManager.get_wordvector(args.word_vector)

if args.fasttest == 1:
    train_data = train_data[:100]
    dev_data = dev_data[:20]
    test_data = test_data[:20]
print("train_data ", len(train_data))
print("dev_data", len(dev_data))
print("test_data", len(test_data))
Exemplo n.º 3
0
import unittest
from lexer import Lexer
from parserr import Parser

file = open(raw_input("Enter Filename: "),'r')

data = file.readlines()
file.close()

currentline = 1

for lines in data:
    print "Proposition: " + lines.rstrip()
    tokenlist = Lexer(lines.rstrip()).tokenize(currentline)
    print "Lexer: ", tokenlist
    grammarlist = Parser().parse(tokenlist)
    print "Parser:", grammarlist
    print
    currentline += 1

'''
class Test(unittest.TestCase):
    def test1(self):
        tokenlist = Lexer('Q').tokenize(1)
        self.assertEqual(tokenlist[0].kind, "ID")
        self.assertEqual(tokenlist[0].loc.col, 1)
        self.assertEqual(tokenlist[0].loc.line, 1)

    def test2(self):
        tokenlist = Lexer('Q').tokenize(1)
        grammarlist = Parser().parse(tokenlist)
Exemplo n.º 4
0
from lexer import Lexer
from parserr import Parser
from smtbuilder import SMTbuilder
import os, sys, unittest

file = open(sys.argv[1], "r")
data = file.readlines()
file.close()

currentline = 1

for lines in data:
    lexerlist = Lexer(lines.rstrip()).tokenize(currentline)
    parserlist = Parser().parse(lexerlist)

    if not "Syntax Error" in parserlist:  # no grammar error found
        SMTbuilder("output.py").build(lexerlist)
        import output
    else:
        print parserlist  # prints error

    currentline += 1


class Test(unittest.TestCase):
    def test1(self):
        lexerlist = Lexer('Q').tokenize(1)
        self.assertEqual(lexerlist[0].kind, "ID")
        self.assertEqual(lexerlist[0].loc.col, 1)
        self.assertEqual(lexerlist[0].loc.line, 1)
Exemplo n.º 5
0
from grammar import Grammar
from grammar_menu import UI
from parserr import Parser


def read_pif(file_path):
    lst = []
    with open(file_path, 'r') as file:
        for line in file.readlines():
            elem = line.split("->")[0].strip()
            lst.append(elem)
    return lst


if __name__ == "__main__":
    # grammar = Grammar("g_test.in")
    # ui = UI(grammar)
    # ui.run()

    lst = read_pif("p1.in")
    print(lst)

    grammar = Grammar("g1.in")

    parser = Parser(grammar)
    w = ["a", "a", "c", "b", "c"]
    parser.run(w)
 def __init__(self):
     self.parser = Parser()