예제 #1
0
파일: parse.py 프로젝트: devintjones/NLPhw
def main():
	try:
		sentences = sys.stdin.readlines()
		model_file = sys.argv[1]
	except:
		raise ValueError('''Usage: cat <file of sentences> | python parse.py <model_file>
		or, python parse.py <model_file>, type sentences and hit Ctrl+d''')
	
	if not os.path.isfile(model_file):
		raise ValueError('cant find the model file')
	
	# scrub list / remove line breaks
	sentences = [sent.rstrip() for sent in sentences]

	# generate dependency graph object from sentences
	depgraphs = [DependencyGraph.from_sentence(sent) for sent in sentences]

	# load model and parse
	tp = TransitionParser.load(model_file)
	parsed = tp.parse(depgraphs)
	
	# print to stdout. 
	# can cat this to a conll file for viewing with MaltEval
	for p in parsed:
		print(p.to_conll(10).encode('utf-8'))
	
	return
예제 #2
0
def main():
    file_to_parse = sys.stdin
    sentences_list = [s for s in file_to_parse]
    file_to_parse.close()

    lang_model = sys.argv[1]
    tp = TransitionParser.load(lang_model)

    sentences = [DependencyGraph.from_sentence(s) for s in sentences_list]
    parsed = tp.parse(sentences)
    for p in parsed:
        print p.to_conll(10).encode('utf-8')
예제 #3
0
파일: parse.py 프로젝트: actondong/NLP
def parse(argv):
    if len(argv) != 2:
	sys.exit( "python parse.py language.model") 
#    data = dataset.get_english_train_corpus().parsed_sents()
#    random.seed(1234)
#    subdata = random.sample(data, 200)
    language_model = argv[1]
    try:
	sentences = sys.stdin.readlines()
	for i,sentence in enumerate(sentences):
            dg = DependencyGraph.from_sentence(sentence)
            tp = TransitionParser.load(language_model)
            parsed = tp.parse([dg])
            print parsed[0].to_conll(10).encode('utf-8')
#	 tp = TransitionParser(Transition, FeatureExtractor)
#        tp.train(subdata)
#        tp.save('english.model')
#        testdata = dataset.get_swedish_test_corpus().parsed_sents()
#        tp = TransitionParser.load('english.model')

#        parsed = tp.parse(testdata)
	    #open new file for write on first sentence
	    if i == 0:
	    	with open('test.conll', 'w') as f:
                    for p in parsed:
                        f.write(p.to_conll(10).encode('utf-8'))
                        f.write('\n')
	    #append for rest sentences
	    else:
        	with open('test.conll', 'a') as f:
                    for p in parsed:
                        f.write(p.to_conll(10).encode('utf-8'))
                        f.write('\n')
        
#        ev = DependencyEvaluator(testdata, parsed)
#        print "UAS: {} \nLAS: {}".format(*ev.eval())

    except NotImplementedError:
        print """
from providedcode import dataset
import sys
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph

if __name__ == '__main__':

    testdata = []
    for line in sys.stdin:
        sentence = DependencyGraph.from_sentence(line)
        testdata.append(sentence)
    model = sys.argv[1]

    tp = TransitionParser.load(model)
    parsed = tp.parse(testdata)

    for p in parsed:
        print(p.to_conll(10).encode('utf-8'))
예제 #5
0
파일: test.py 프로젝트: parasmehta/nlpintro
        # testdata = dataset.get_swedish_test_corpus().parsed_sents()
        # tp = TransitionParser.load('badfeatures.model')

        #parsed = tp.parse(testdata)

        #with open('test.conll', 'w') as f:
        #    for p in parsed:
        #        f.write(p.to_conll(10).encode('utf-8'))
        #        f.write('\n')

        #ev = DependencyEvaluator(testdata, parsed)
        #print "LAS: {} \nUAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (english):
        sentence = DependencyGraph.from_sentence('Hi, this is a test')
        print sentence

        print "model loading"
        tp = TransitionParser.load('badfeatures.model')
        print "model loaded"
        print "sentence parsing"
        parsed = tp.parse([sentence])
        print "sentence parsed"
        print parsed[0].to_conll(10).encode('utf-8')
    except NotImplementedError:
        print """
        This file is currently broken! We removed the implementation of Transition
        (in transition.py), which tells the transitionparser how to go from one
        Configuration to another Configuration. This is an essential part of the
        arc-eager dependency parsing algorithm, so you should probably fix that :)
예제 #6
0
파일: test.py 프로젝트: Alexoner/mooc
        # tp = TransitionParser.load('badfeatures.model')
        # testdata = dataset.get_english_test_corpus().parsed_sents()
        # tp = TransitionParser.load('english.model')

        parsed = tp.parse(testdata)

        with open('test.conll', 'w') as f:
            for p in parsed:
                f.write(p.to_conll(10).encode('utf-8'))
                f.write('\n')

        ev = DependencyEvaluator(testdata, parsed)
        print "LAS: {} \nUAS: {}".format(*ev.eval())

        # parsing arbitrary sentences (english):
        sentence = DependencyGraph.from_sentence('Hi, this is a test')

        tp = TransitionParser.load('english.model')
        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode('utf-8')
    except NotImplementedError:
        print """
        This file is currently broken! We removed the implementation of Transition
        (in transition.py), which tells the transitionparser how to go from one
        Configuration to another Configuration. This is an essential part of the
        arc-eager dependency parsing algorithm, so you should probably fix that :)

        The algorithm is described in great detail here:
            http://aclweb.org/anthology//C/C12/C12-1059.pdf

        We also haven't actually implemented most of the features for for the
예제 #7
0
def handle_input(input_file, model_file):
    tp = TransitionParser.load(model_file)
    for line in input_file:
        sentence = DependencyGraph.from_sentence(line)
        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode('utf-8')
예제 #8
0
파일: parse.py 프로젝트: adamsachs/NLP
import sys
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph

if __name__ == '__main__':
    sentences = sys.stdin.readlines()
    tp = TransitionParser.load(sys.argv[1])
    for sentence in sentences:
        dg = DependencyGraph.from_sentence(sentence) 
        parsed = tp.parse([dg])
        print parsed[0].to_conll(10).encode('utf-8')
        #print '\n'
예제 #9
0
import sys
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph

if __name__ == '__main__':
    # the raw sentences read from englishfile
    lines = sys.stdin

    # if no sentences read or not enough parameters retrieved, exit the program.
    if not lines or not sys.argv.__len__() == 2:
        exit()
    # put the raw sentences in to dependency graphs and form a list of these graphs.
    sentences = [DependencyGraph.from_sentence(line) for line in lines]

    model_name = sys.argv[1]

    # load the trained model
    tp = TransitionParser.load(model_name)

    # parse the sentences with the model
    parsed = tp.parse(sentences)

    # write the parsed sentences into the output file conll supported format.
    for parsed_line in parsed:
        print parsed_line.to_conll(10).encode('utf-8')

    #sentence = DependencyGraph.from_sentence('Hi, this is a test')
    #tp = TransitionParser.load('english.model')
    #parsed = tp.parse([sentence])
    #print parsed[0].to_conll(10).encode('utf-8')
예제 #10
0
import random
from providedcode.dependencygraph import DependencyGraph
from providedcode import dataset
from providedcode.transitionparser import TransitionParser
from providedcode.evaluate import DependencyEvaluator
from featureextractor import FeatureExtractor
from transition import Transition
import sys

if __name__ == "__main__":
    try:
        # parsing arbitrary sentences (english):
        fromInput = "".join(sys.stdin.readlines())
        # print fromInput
        sentence = DependencyGraph.from_sentence(fromInput)

        tp = TransitionParser.load("english.model")
        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode("utf-8")
    except NotImplementedError:
        print """
        This file is currently broken! We removed the implementation of Transition
        (in transition.py), which tells the transitionparser how to go from one
        Configuration to another Configuration. This is an essential part of the
        arc-eager dependency parsing algorithm, so you should probably fix that :)

        The algorithm is described in great detail here:
            http://aclweb.org/anthology//C/C12/C12-1059.pdf

        We also haven't actually implemented most of the features for for the
        support vector machine (in featureextractor.py), so as you might expect the
예제 #11
0
파일: parse.py 프로젝트: keyu-lai/NLP
import sys
from providedcode.transitionparser import TransitionParser
from providedcode.evaluate import DependencyEvaluator
from featureextractor import FeatureExtractor
from providedcode.dependencygraph import DependencyGraph
from transition import Transition

if __name__ == "__main__":
    sentences = []
    try:
        while 1:
            sentence = raw_input().strip()
            sentences.append(DependencyGraph.from_sentence(sentence))
    except EOFError:
        pass

    tp = TransitionParser.load(sys.argv[1])
    parsed = tp.parse(sentences)

    for p in parsed:
        print p.to_conll(10).encode("utf-8")
예제 #12
0
파일: parse.py 프로젝트: chewpin/hw2
import sys
if __name__ == '__main__':

    try:
        model = sys.argv[1]
        tp = TransitionParser.load(model)

        for line in sys.stdin:
            # temp = line.strip()
            # temp = str(temp)
            # parsing arbitrary sentences (english):
            # print "[" + temp + "]"
            temp = line
            # temp = "Hi, this is a test."

            sentence = DependencyGraph.from_sentence(temp)

            for key, dct in sentence.nodes.items():
                dct['ctag'] = nltk.tag.mapping.map_tag("en-ptb", "universal", dct['ctag'])

            parsed = tp.parse([sentence])
            print parsed[0].to_conll(10).encode('utf-8')
            
    except NotImplementedError:
        print """
        This file is currently broken! We removed the implementation of Transition
        (in transition.py), which tells the transitionparser how to go from one
        Configuration to another Configuration. This is an essential part of the
        arc-eager dependency parsing algorithm, so you should probably fix that :)

        The algorithm is described in great detail here:
예제 #13
0
import sys
from providedcode.transitionparser import TransitionParser
from providedcode.evaluate import DependencyEvaluator
from providedcode.dependencygraph import DependencyGraph
from nltk.tag import mapping

if len(sys.argv) != 2:
	sys.stderr.write("No model provided.")
	sys.exit(1)

tp = TransitionParser.load(sys.argv[1])

for sentence in sys.stdin: 
    s = DependencyGraph.from_sentence(sentence) #class DependencyGraph, function from_sentence
    for node in s.nodes:
            tag = s.nodes[node]['tag']
            ctag = mapping.map_tag('wsj','universal',tag)
            s.nodes[node]['ctag'] = ctag
    x = tp.parse([s])
    print x[0].to_conll(10).encode('utf-8')

# model: sys.argv(1) - english.model
예제 #14
0
def handle_input(input_file, model_file):
    tp = TransitionParser.load(model_file)
    for line in input_file:
        sentence = DependencyGraph.from_sentence(line)
        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode('utf-8')
예제 #15
0
파일: parse.py 프로젝트: asubhangi/NLP
import sys
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph

if __name__ == '__main__':
    model = sys.argv[1]
    data = sys.stdin.readlines()
    for item in data:
        sentence = DependencyGraph.from_sentence(item)
        tp = TransitionParser.load(model)
        parsed = tp.parse([sentence])
        print parsed[0].to_conll(10).encode('utf-8')
        sys.stdout.flush()
예제 #16
0
__author__ = 'johnfulgoni'

import sys
from providedcode.dependencygraph import DependencyGraph
from providedcode.transitionparser import TransitionParser

# DON'T PRINT ANYTHING! OR ELSE IT MESSES THINGS UP

if __name__ == '__main__':
    argc = len(sys.argv)
    if argc == 2:
        #print sys.argv[1] # just to see

        sentence_list = []
        for sent in sys.stdin:  # get the sentences from the englishfile
            sentence = DependencyGraph.from_sentence(sent)
            sentence_list.append(sentence)

        my_model = sys.argv[1]  # should be 'english.model'
        tp = TransitionParser.load(my_model)
        parsed = tp.parse(sentence_list)

        # following the example in test.py
        # but we're not writing it to a file
        for p in parsed:
            print p.to_conll(10).encode('utf-8')
            print '\n'

    else:
        print "Need two arguments"
        exit(1)
예제 #17
0
import providedcode
from providedcode.transitionparser import TransitionParser
from providedcode.dependencygraph import DependencyGraph
from providedcode.evaluate import DependencyEvaluator
import sys

tp = TransitionParser.load('english.model')

for line in sys.stdin:
    sentence = DependencyGraph.from_sentence(line)
    parsed = tp.parse([sentence])
    print parsed[0].to_conll(10).encode('utf-8')


















예제 #18
0
import sys
import fileinput
from providedcode.transitionparser import TransitionParser
from providedcode import dataset
from providedcode.dependencygraph import DependencyGraph

englishfile = sys.stdin.read()
lines = englishfile.split('\n')

model =  sys.argv[1]

tp = TransitionParser.load(model)


for line in lines:
    sentence = DependencyGraph.from_sentence(line.strip())
    parsed = tp.parse([sentence])
    print parsed[0].to_conll(10).encode('utf-8'), ('\n')