Esempio n. 1
0
def main():
    if len(sys.argv) < 4:
        print """
        Usage:

        python parse.py in.model > out.conll

        Input can be provided manually via the command prompt or piped directly
        to the script using cat.
        """
    # END if

    if sys.stdin.isatty():
        rawtext = [raw_input("Please type a sentence!")]
    else:
        rawtext = sys.stdin.read()
    # END if

    out_filename = sys.argv[3]
    model_filename = sys.argv[1]

    try:
        tp = TransitionParser.load(model_filename)
        parsed = tp.parse(rawtext)

        with open(out_filename, 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')
            # END for
        # END with
    except Exception:
        "Error."
Esempio n. 2
0
def main():
	try:
		sentences = sys.stdin.readlines()
		model_file = sys.argv[1]
	except:
		raise ValueError('''Usage: cat <file of sentences> | python parse.py <model_file>
		or, python parse.py <model_file>, type sentences and hit Ctrl+d''')
	
	if not os.path.isfile(model_file):
		raise ValueError('cant find the model file')
	
	# scrub list / remove line breaks
	sentences = [sent.rstrip() for sent in sentences]

	# generate dependency graph object from sentences
	depgraphs = [DependencyGraph.from_sentence(sent) for sent in sentences]

	# load model and parse
	tp = TransitionParser.load(model_file)
	parsed = tp.parse(depgraphs)
	
	# print to stdout. 
	# can cat this to a conll file for viewing with MaltEval
	for p in parsed:
		print(p.to_conll(10).encode('utf-8'))
	
	return
Esempio n. 3
0
def evaluate_parse(partIdx):
  if partIdx == 3:
    print 'Evaluating your swedish model ... '
    testdata = dataset.get_swedish_test_corpus().parsed_sents()
    if not os.path.exists('./swedish.model'):
      print 'No model. Please save your model as swedish.model at current directory before submission.'
      sys.exit(0)
    tp = TransitionParser.load('swedish.model')
    parsed = tp.parse(testdata)
    ev = DependencyEvaluator(testdata, parsed)
    uas, las = ev.eval()
    print 'UAS:',uas
    print 'LAS:',las
    swed_score = (min(las, 0.7) / 0.7) ** 2
    return swed_score
  
  if partIdx == 1:
    print 'Evaluating your english model ... '
    testdata = dataset.get_english_test_corpus().parsed_sents()
    if not os.path.exists('./english.model'):
      print 'No model. Please save your model as english.model at current directory before submission.'
      sys.exit(0)
    tp = TransitionParser.load('english.model')
    parsed = tp.parse(testdata)
    ev = DependencyEvaluator(testdata, parsed)
    uas, las = ev.eval()
    print 'UAS:',uas
    print 'LAS:',las
    eng_score = (min(las, 0.7) / 0.7) ** 2
    return eng_score
  
  if partIdx == 2:
    print 'Evaluating your danish model ... '
    testdata = dataset.get_danish_test_corpus().parsed_sents()
    if not os.path.exists('./danish.model'):
      print 'No model. Please save your model danish.model at current directory before submission.'
      sys.exit(0)
    tp = TransitionParser.load('danish.model')
    parsed = tp.parse(testdata)
    ev = DependencyEvaluator(testdata, parsed)
    uas, las = ev.eval()
    print 'UAS:',uas
    print 'LAS:',las
    dan_score = (min(las, 0.7) / 0.7) ** 2
    return dan_score
Esempio n. 4
0
def evaluate_parse(partIdx):
    if partIdx == 3:
        print 'Evaluating your swedish model ... '
        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        if not os.path.exists('./swedish.model'):
            print 'No model. Please save your model as swedish.model at current directory before submission.'
            sys.exit(0)
        tp = TransitionParser.load('swedish.model')
        parsed = tp.parse(testdata)
        ev = DependencyEvaluator(testdata, parsed)
        uas, las = ev.eval()
        print 'UAS:', uas
        print 'LAS:', las
        swed_score = (min(las, 0.7) / 0.7)**2
        return swed_score

    if partIdx == 1:
        print 'Evaluating your english model ... '
        testdata = dataset.get_english_test_corpus().parsed_sents()
        if not os.path.exists('./english.model'):
            print 'No model. Please save your model as english.model at current directory before submission.'
            sys.exit(0)
        tp = TransitionParser.load('english.model')
        parsed = tp.parse(testdata)
        ev = DependencyEvaluator(testdata, parsed)
        uas, las = ev.eval()
        print 'UAS:', uas
        print 'LAS:', las
        eng_score = (min(las, 0.7) / 0.7)**2
        return eng_score

    if partIdx == 2:
        print 'Evaluating your danish model ... '
        testdata = dataset.get_danish_test_corpus().parsed_sents()
        if not os.path.exists('./danish.model'):
            print 'No model. Please save your model danish.model at current directory before submission.'
            sys.exit(0)
        tp = TransitionParser.load('danish.model')
        parsed = tp.parse(testdata)
        ev = DependencyEvaluator(testdata, parsed)
        uas, las = ev.eval()
        print 'UAS:', uas
        print 'LAS:', las
        dan_score = (min(las, 0.7) / 0.7)**2
        return dan_score
Esempio n. 5
0
def main():
    file_to_parse = sys.stdin
    sentences_list = [s for s in file_to_parse]
    file_to_parse.close()

    lang_model = sys.argv[1]
    tp = TransitionParser.load(lang_model)

    sentences = [DependencyGraph.from_sentence(s) for s in sentences_list]
    parsed = tp.parse(sentences)
    for p in parsed:
        print p.to_conll(10).encode('utf-8')
Esempio n. 6
0
def verify_lang_data(model, conll_output):
    try:
        lang = extract_lang_from_model_name(model)
        testdata = get_data_from_lang(lang)
        tp = TransitionParser.load(model)

        parsed = tp.parse(testdata)

        with open(conll_output, 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        uas, las = ev.eval()
        print "\n=====Prediction of {}.model===== \nUAS: {} \nLAS: {}".format(lang, uas, las)
        return las
        pass
    except ValueError as e:
        print(e)
Esempio n. 7
0
def parse(argv):
    if len(argv) != 2:
	sys.exit( "python parse.py language.model") 
#    data = dataset.get_english_train_corpus().parsed_sents()
#    random.seed(1234)
#    subdata = random.sample(data, 200)
    language_model = argv[1]
    try:
	sentences = sys.stdin.readlines()
	for i,sentence in enumerate(sentences):
            dg = DependencyGraph.from_sentence(sentence)
            tp = TransitionParser.load(language_model)
            parsed = tp.parse([dg])
            print parsed[0].to_conll(10).encode('utf-8')
#	 tp = TransitionParser(Transition, FeatureExtractor)
#        tp.train(subdata)
#        tp.save('english.model')
#        testdata = dataset.get_swedish_test_corpus().parsed_sents()
#        tp = TransitionParser.load('english.model')

#        parsed = tp.parse(testdata)
	    #open new file for write on first sentence
	    if i == 0:
	    	with open('test.conll', 'w') as f:
                    for p in parsed:
                        f.write(p.to_conll(10).encode('utf-8'))
                        f.write('\n')
	    #append for rest sentences
	    else:
        	with open('test.conll', 'a') as f:
                    for p in parsed:
                        f.write(p.to_conll(10).encode('utf-8'))
                        f.write('\n')
        
#        ev = DependencyEvaluator(testdata, parsed)
#        print "UAS: {} \nLAS: {}".format(*ev.eval())

    except NotImplementedError:
        print """
    subdata = random.sample(data, 200) # use this subdata for bad features and swedish

    # NEED DANISH AND ENGLISH
    data_e = dataset.get_english_train_corpus().parsed_sents()
    random.seed(1234)
    subdata_e = random.sample(data_e, 200)

    data_d = dataset.get_danish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata_d = random.sample(data_d, 200)

    try:
        # BAD FEATURES MODEL (SWEDISH DATA)
        print "Starting Bad Features"
        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        tp = TransitionParser.load('badfeatures.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "Bad Features Results"
        print "UAS: {} \nLAS: {}".format(*ev.eval())
        t1 = time.time()
        print "Time: "+str(t1 - t0) + '\n'

        # SWEDISH FEATURE MODELS
Esempio n. 9
0
    F_TRAIN_ENGLISH = True
    F_TRAIN_DANISH  = True
    F_TRAIN_KOREAN  = False

    #traindata = dataset.get_swedish_train_corpus().parsed_sents()


    try:
        if F_TEST_BADMODEL == True:
            print time.ctime(), "START BADMODEL"
            traindata = dataset.get_swedish_train_corpus().parsed_sents()
            labeleddata = dataset.get_swedish_dev_corpus().parsed_sents()
            blinddata = dataset.get_swedish_dev_blind_corpus().parsed_sents()

            modelfile = 'badfeatures.model'
            tp = TransitionParser.load(modelfile)
            parsed = tp.parse(blinddata)

            ev = DependencyEvaluator(labeleddata, parsed)
            print "UAS: {} \nLAS: {}".format(*ev.eval())

            conllfile = 'test.conll'
            with open(conllfile, 'w') as f:
                for p in parsed:
                    f.write(p.to_conll(10).encode('utf-8'))
                    f.write('\n')

            print time.ctime(), "-------DONE----- BADMODEL", modelfile, conllfile

        if F_TRAIN_SWEDISH == True:
            print time.ctime(), "START TRAIN SWEDISH"
Esempio n. 10
0
    random.seed(1234)
    subdata = random.sample(data, 200)

    try:
        tp = TransitionParser(Transition, FeatureExtractor)
        tp.train(subdata)
        #tp.save('swedish.model')
        tp.save('korean.model')
        #tp.save('danish.model')

        #testdata = dataset.get_swedish_test_corpus().parsed_sents()
        testdata = dataset.get_korean_test_corpus().parsed_sents()
        #testdata = dataset.get_danish_test_corpus().parsed_sents()

        #tp = TransitionParser.load('swedish.model')
        tp = TransitionParser.load('korean.model')
        #tp = TransitionParser.load('danish.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "UAS: {} \nLAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (english):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')
Esempio n. 11
0
def handle_input(input_file, model_file):
    tp = TransitionParser.load(model_file)
    for line in input_file:
        sentence = DependencyGraph.from_sentence(line)
        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode('utf-8')
Esempio n. 12
0
from transition import Transition

if __name__ == '__main__':
    data = dataset.get_swedish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata = random.sample(data, 200)

    try:

        tp = TransitionParser(Transition, FeatureExtractor)

        tp.train(subdata)
        tp.save('swedish.model')

        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        tp = TransitionParser.load('swedish.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "UAS: {} \nLAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (swedish):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')

        # tp = TransitionParser.load('swedish.model')
Esempio n. 13
0
from providedcode.evaluate import DependencyEvaluator
from featureextractor import FeatureExtractor
from transition import Transition

if __name__ == '__main__':
    data = dataset.get_swedish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata = random.sample(data, 200)

    try:
        # tp = TransitionParser(Transition, FeatureExtractor)
        # tp.train(subdata)
        # tp.save('swedish.model')

        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        tp = TransitionParser.load('badfeatures.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "UAS: {} \nLAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (english):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')

        # tp = TransitionParser.load('english.model')
Esempio n. 14
0
    F_TRAIN_SWEDISH = True
    F_TRAIN_ENGLISH = True
    F_TRAIN_DANISH = True
    F_TRAIN_KOREAN = False

    #traindata = dataset.get_swedish_train_corpus().parsed_sents()

    try:
        if F_TEST_BADMODEL == True:
            print time.ctime(), "START BADMODEL"
            traindata = dataset.get_swedish_train_corpus().parsed_sents()
            labeleddata = dataset.get_swedish_dev_corpus().parsed_sents()
            blinddata = dataset.get_swedish_dev_blind_corpus().parsed_sents()

            modelfile = 'badfeatures.model'
            tp = TransitionParser.load(modelfile)
            parsed = tp.parse(blinddata)

            ev = DependencyEvaluator(labeleddata, parsed)
            print "UAS: {} \nLAS: {}".format(*ev.eval())

            conllfile = 'test.conll'
            with open(conllfile, 'w') as f:
                for p in parsed:
                    f.write(p.to_conll(10).encode('utf-8'))
                    f.write('\n')

            print time.ctime(
            ), "-------DONE----- BADMODEL", modelfile, conllfile

        if F_TRAIN_SWEDISH == True:
Esempio n. 15
0
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph
import fileinput
import sys
# parsing arbitrary sentences (english):
import nltk
from nltk.tag import map_tag

if __name__ == '__main__':
    if (len(sys.argv) != 2):
        print "need 1 argument for model!"
        exit(1)

    tp = TransitionParser.load(sys.argv[1])
    line = sys.stdin.readline()
    while line:

        sentence = DependencyGraph.from_sentence(line)
        for (index, node) in enumerate(sentence.nodes):
            sentence.nodes[index]['ctag'] = map_tag(
                'en-ptb', 'universal', sentence.nodes[index]['ctag'])

        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode('utf-8')
        line = sys.stdin.readline()
Esempio n. 16
0
import sys
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph

if __name__ == '__main__':
    # the raw sentences read from englishfile
    lines = sys.stdin

    # if no sentences read or not enough parameters retrieved, exit the program.
    if not lines or not sys.argv.__len__() == 2:
        exit()
    # put the raw sentences in to dependency graphs and form a list of these graphs.
    sentences = [DependencyGraph.from_sentence(line) for line in lines]

    model_name = sys.argv[1]

    # load the trained model
    tp = TransitionParser.load(model_name)

    # parse the sentences with the model
    parsed = tp.parse(sentences)

    # write the parsed sentences into the output file conll supported format.
    for parsed_line in parsed:
        print parsed_line.to_conll(10).encode('utf-8')

    #sentence = DependencyGraph.from_sentence('Hi, this is a test')
    #tp = TransitionParser.load('english.model')
    #parsed = tp.parse([sentence])
    #print parsed[0].to_conll(10).encode('utf-8')
Esempio n. 17
0
from providedcode.dependencygraph import DependencyGraph
from providedcode import dataset
from providedcode.transitionparser import TransitionParser
from providedcode.evaluate import DependencyEvaluator
from featureextractor import FeatureExtractor
from transition import Transition
import sys

if __name__ == "__main__":
    try:
        # parsing arbitrary sentences (english):
        fromInput = "".join(sys.stdin.readlines())
        # print fromInput
        sentence = DependencyGraph.from_sentence(fromInput)

        tp = TransitionParser.load("english.model")
        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode("utf-8")
    except NotImplementedError:
        print """
        This file is currently broken! We removed the implementation of Transition
        (in transition.py), which tells the transitionparser how to go from one
        Configuration to another Configuration. This is an essential part of the
        arc-eager dependency parsing algorithm, so you should probably fix that :)

        The algorithm is described in great detail here:
            http://aclweb.org/anthology//C/C12/C12-1059.pdf

        We also haven't actually implemented most of the features for for the
        support vector machine (in featureextractor.py), so as you might expect the
        evaluator is going to give you somewhat bad results...
Esempio n. 18
0
File: test.py Progetto: jpgard/NLP
from providedcode.evaluate import DependencyEvaluator
from featureextractor import FeatureExtractor
from transition import Transition

if __name__ == '__main__':
    data = dataset.get_swedish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata = random.sample(data, 200)

    try:
        # removed commenting from following three lines, should generate saved models
        tp = TransitionParser(Transition, FeatureExtractor)
        tp.train(subdata)
        tp.save('swedish.model')
        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        tp = TransitionParser.load('swedish.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "UAS: {} \nLAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (swedish):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')

        # tp = TransitionParser.load('swedish.model')
Esempio n. 19
0
File: parse.py Progetto: chewpin/hw2
import random
import nltk
from providedcode import dataset
from providedcode.transitionparser import TransitionParser
from providedcode.evaluate import DependencyEvaluator
from providedcode.dependencygraph import DependencyGraph
from featureextractor import FeatureExtractor
from transition import Transition

import sys
if __name__ == '__main__':

    try:
        model = sys.argv[1]
        tp = TransitionParser.load(model)

        for line in sys.stdin:
            # temp = line.strip()
            # temp = str(temp)
            # parsing arbitrary sentences (english):
            # print "[" + temp + "]"
            temp = line
            # temp = "Hi, this is a test."

            sentence = DependencyGraph.from_sentence(temp)

            for key, dct in sentence.nodes.items():
                dct['ctag'] = nltk.tag.mapping.map_tag("en-ptb", "universal", dct['ctag'])

            parsed = tp.parse([sentence])
            print parsed[0].to_conll(10).encode('utf-8')
Esempio n. 20
0
    # load test set in danish and get 200 random sentences
    danish_data = dataset.get_danish_train_corpus().parsed_sents()
    random.seed()
    danish_subdata = random.sample(danish_data, 200)

    try:
        print 'training swedish'

        # swedish
        tp = TransitionParser(Transition, FeatureExtractor)
        tp.train(swedish_subdata)
        tp.save('swedish.model')

        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        tp = TransitionParser.load('swedish.model')

        print 'testing swedish'
        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print 'Swedish results'
        print "UAS: {} \nLAS: {}".format(*ev.eval())


        # english
Esempio n. 21
0
from providedcode.evaluate import DependencyEvaluator
from featureextractor import FeatureExtractor
from transition import Transition

if __name__ == '__main__':
    data = dataset.get_swedish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata = random.sample(data, 200)

    try:
        # tp = TransitionParser(Transition, FeatureExtractor)
        # tp.train(subdata)
        # tp.save('swedish.model')

        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        tp = TransitionParser.load('badfeatures.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "UAS: {} \nLAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (english):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')

        # tp = TransitionParser.load('english.model')
Esempio n. 22
0
    subdata = random.sample(data, 200)

    try:
        tp = TransitionParser(Transition, FeatureExtractor)
        tp.train(subdata)
        #tp.save('swedish.model')
        #tp.save('korean.model')
        tp.save('danish.model')

        #testdata = dataset.get_swedish_test_corpus().parsed_sents()
        #testdata = dataset.get_korean_test_corpus().parsed_sents()
        testdata = dataset.get_danish_test_corpus().parsed_sents()

        #tp = TransitionParser.load('swedish.model')
        #tp = TransitionParser.load('korean.model')
        tp = TransitionParser.load('danish.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "UAS: {} \nLAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (english):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')

        # tp = TransitionParser.load('english.model')
Esempio n. 23
0
import providedcode
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph
from providedcode.evaluate import DependencyEvaluator
import sys

tp = TransitionParser.load('english.model')

for line in sys.stdin:
    sentence = DependencyGraph.from_sentence(line)
    parsed = tp.parse([sentence])
    print parsed[0].to_conll(10).encode('utf-8')


















Esempio n. 24
0
__author__ = 'johnfulgoni'

import sys
from providedcode.dependencygraph import DependencyGraph
from providedcode.transitionparser import TransitionParser

# DON'T PRINT ANYTHING! OR ELSE IT MESSES THINGS UP

if __name__ == '__main__':
    argc = len(sys.argv)
    if argc == 2:
        #print sys.argv[1] # just to see

        sentence_list = []
        for sent in sys.stdin:  # get the sentences from the englishfile
            sentence = DependencyGraph.from_sentence(sent)
            sentence_list.append(sentence)

        my_model = sys.argv[1]  # should be 'english.model'
        tp = TransitionParser.load(my_model)
        parsed = tp.parse(sentence_list)

        # following the example in test.py
        # but we're not writing it to a file
        for p in parsed:
            print p.to_conll(10).encode('utf-8')
            print '\n'

    else:
        print "Need two arguments"
        exit(1)
from providedcode.evaluate import DependencyEvaluator
from featureextractor import FeatureExtractor
from transition import Transition

if __name__ == '__main__':
    data = dataset.get_danish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata = random.sample(data, 200)

    try:
        tp = TransitionParser(Transition, FeatureExtractor)
        tp.train(subdata)
        tp.save('danish.model')

        testdata = dataset.get_danish_test_corpus().parsed_sents()
        tp = TransitionParser.load('danish.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "LAS: {} \nUAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (danish):
        sentence = DependencyGraph.from_sentence('Hi, this is a test')

        tp = TransitionParser.load('danish.model')
Esempio n. 26
0
    # data = dataset.get_danish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata = random.sample(data, 400)

    try:
        tp = TransitionParser(Transition, FeatureExtractor)
        tp.train(subdata)
        # tp.save('swedish.model')
        tp.save('english.model')
        # tp.save('danish.model')

        # testdata = dataset.get_swedish_test_corpus().parsed_sents()
        testdata = dataset.get_english_test_corpus().parsed_sents()
        # testdata = dataset.get_danish_test_corpus().parsed_sents()
        # tp = TransitionParser.load('swedish.model')
        tp = TransitionParser.load('english.model')
        # tp = TransitionParser.load('danish.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "LAS: {} \nUAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (english):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')
Esempio n. 27
0
        data, 200)  # use this subdata for bad features and swedish

    # NEED DANISH AND ENGLISH
    data_e = dataset.get_english_train_corpus().parsed_sents()
    random.seed(1234)
    subdata_e = random.sample(data_e, 200)

    data_d = dataset.get_danish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata_d = random.sample(data_d, 200)

    try:
        # BAD FEATURES MODEL (SWEDISH DATA)
        print "Starting Bad Features"
        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        tp = TransitionParser.load('badfeatures.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "Bad Features Results"
        print "UAS: {} \nLAS: {}".format(*ev.eval())
        t1 = time.time()
        print "Time: " + str(t1 - t0) + '\n'

        # SWEDISH FEATURE MODELS
Esempio n. 28
0
import sys
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph

if __name__ == '__main__':
    sentences = sys.stdin.readlines()
    tp = TransitionParser.load(sys.argv[1])
    for sentence in sentences:
        dg = DependencyGraph.from_sentence(sentence) 
        parsed = tp.parse([dg])
        print parsed[0].to_conll(10).encode('utf-8')
        #print '\n'
Esempio n. 29
0
from providedcode.transitionparser import TransitionParser
from transition import Transition

if __name__ == '__main__':
    # print 'NLP Parse Program..'

    try:
        model_path = sys.argv[1]
        # print 'ModelPath', model_path
    except IndexError as ie:
        print 'Model Path Not Specified! Exiting...', ie
        sys.exit(-1)

    try:
        tp = TransitionParser(Transition, FeatureExtractor)
        tp = TransitionParser.load(model_path)   # load the trained model for parsing.

        for line in sys.stdin:
            # print 'Processing:', line
            sentence = DependencyGraph.from_sentence(line)
            parsed = tp.parse([sentence]) # parse the input line
            print parsed[0].to_conll(10).encode('utf-8')

        # with open('test.conll', 'w') as f:
        #     for p in parsed:
        #         f.write(p.to_conll(10).encode('utf-8'))
        #         f.write('\n')

        # parsing arbitrary sentences (english):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')
Esempio n. 30
0
from providedcode.evaluate import DependencyEvaluator
from featureextractor import FeatureExtractor
from transition import Transition

if __name__ == "__main__":
    data = dataset.get_swedish_train_corpus().parsed_sents()
    random.seed(1234)
    subdata = random.sample(data, 200)

    try:
        # tp = TransitionParser(Transition, FeatureExtractor)
        # tp.train(subdata)
        # tp.save('swedish.model')

        testdata = dataset.get_swedish_test_corpus().parsed_sents()
        tp = TransitionParser.load("badfeatures.model")

        parsed = tp.parse(testdata)

        with open("test.conll", "w") as f:
            for p in parsed:
                f.write(p.to_conll(10).encode("utf-8"))
                f.write("\n")

        ev = DependencyEvaluator(testdata, parsed)
        print "LAS: {} \nUAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (english):
        # sentence = DependencyGraph.from_sentence('Hi, this is a test')

        # tp = TransitionParser.load('english.model')
Esempio n. 31
0
def handle_input(input_file, model_file):
    tp = TransitionParser.load(model_file)
    for line in input_file:
        sentence = DependencyGraph.from_sentence(line)
        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode('utf-8')