def main(): """Trains all of the language models and tests them on the dev data. Change devPath if you wish to do things like test on the training data. """ trainPath = '../data/micro/en_US/' trainingCorpus = CapstoneCorpus(trainPath) #print str(trainingCorpus) sent = "When you breathe, I want to be the air for you. I'll be there for you, I'd live and I'd" tokens = Tokenize(sent) print 'Uniform Language Model: ' uniformLM = UniformLanguageModel(trainingCorpus) print "VocSize= " + str(len(uniformLM.words)) print sent print tokens print "uniform score=" + str(uniformLM.score(tokens)) print 'Unigram Language Model: ' unigramLM = UnigramLanguageModel(trainingCorpus) print "VocSize= " + str(len(unigramLM.unigramCounts)) print "unigram score=" + str(unigramLM.score(tokens)) print 'Laplace Unigram Language Model: ' laplaceUnigramLM = LaplaceUnigramLanguageModel(trainingCorpus) laplaceUnigramLM.save("smallUnigram.LM") print "VocSize= " + str(len(laplaceUnigramLM.f1)) print "unigram score=" + str(laplaceUnigramLM.score(tokens)) print 'Laplace Bigram Language Model: ' laplaceBigramLM = LaplaceBigramLanguageModel(trainingCorpus) laplaceBigramLM.save("smallBigram.LM") print "bigram score=" + str(laplaceBigramLM.score(tokens)) print 'Laplace Ngram Language Model: N=2' laplaceN2gramLM = LaplaceNgramLanguageModel(trainingCorpus,2) laplaceN2gramLM.save("smallN2gram.LM") print "N=2gram score=" + str(laplaceN2gramLM.score(tokens)) print 'Laplace Ngram Language Model: N=3' laplaceN3gramLM = LaplaceNgramLanguageModel(trainingCorpus,3) laplaceN3gramLM.save("smallN3gram.LM") print "N=3gram score=" + str(laplaceN2gramLM.score(tokens)) print 'Custom Language Model: ' customLM = CustomLanguageModel(trainingCorpus,N=2) print "Custom LM score=" + str(customLM.score(tokens))
def main(): """Trains all of the language models and tests them on the dev data. Change devPath if you wish to do things like test on the training data. """ trainPath = '../data/micro/en_US/' trainingCorpus = CapstoneCorpus(trainPath) #print str(trainingCorpus) sent = "When you breathe, I want to be the air for you. I'll be there for you, I'd live and I'd" tokens = Tokenize(sent) print 'Uniform Language Model: ' uniformLM = UniformLanguageModel(trainingCorpus) print "VocSize= " + str(len(uniformLM.words)) print sent print tokens print "uniform score=" + str(uniformLM.score(tokens)) print 'Unigram Language Model: ' unigramLM = UnigramLanguageModel(trainingCorpus) print "VocSize= " + str(len(unigramLM.unigramCounts)) print "unigram score=" + str(unigramLM.score(tokens)) print 'Laplace Unigram Language Model: ' laplaceUnigramLM = LaplaceUnigramLanguageModel(trainingCorpus) laplaceUnigramLM.save("smallUnigram.LM") print "VocSize= " + str(len(laplaceUnigramLM.f1)) print "unigram score=" + str(laplaceUnigramLM.score(tokens)) print 'Laplace Bigram Language Model: ' laplaceBigramLM = LaplaceBigramLanguageModel(trainingCorpus) laplaceBigramLM.save("smallBigram.LM") print "bigram score=" + str(laplaceBigramLM.score(tokens)) print 'Laplace Ngram Language Model: N=2' laplaceN2gramLM = LaplaceNgramLanguageModel(trainingCorpus, 2) laplaceN2gramLM.save("smallN2gram.LM") print "N=2gram score=" + str(laplaceN2gramLM.score(tokens)) print 'Laplace Ngram Language Model: N=3' laplaceN3gramLM = LaplaceNgramLanguageModel(trainingCorpus, 3) laplaceN3gramLM.save("smallN3gram.LM") print "N=3gram score=" + str(laplaceN2gramLM.score(tokens)) print 'Custom Language Model: ' customLM = CustomLanguageModel(trainingCorpus, N=2) print "Custom LM score=" + str(customLM.score(tokens))
def main(): """Trains all of the language models and tests them on the dev data. Change devPath if you wish to do things like test on the training data.""" trainPath = '../data/holbrook-tagged-train.dat' trainingCorpus = HolbrookCorpus(trainPath) devPath = '../data/holbrook-tagged-dev.dat' devCorpus = HolbrookCorpus(devPath) print 'Unigram Language Model: ' unigramLM = UnigramLanguageModel(trainingCorpus) unigramSpell = SpellCorrect(unigramLM, trainingCorpus) unigramOutcome = unigramSpell.evaluate(devCorpus) print str(unigramOutcome) print 'Uniform Language Model: ' uniformLM = UniformLanguageModel(trainingCorpus) uniformSpell = SpellCorrect(uniformLM, trainingCorpus) uniformOutcome = uniformSpell.evaluate(devCorpus) print str(uniformOutcome) print 'Laplace Unigram Language Model: ' laplaceUnigramLM = LaplaceUnigramLanguageModel(trainingCorpus) laplaceUnigramSpell = SpellCorrect(laplaceUnigramLM, trainingCorpus) laplaceUnigramOutcome = laplaceUnigramSpell.evaluate(devCorpus) print str(laplaceUnigramOutcome) print 'Laplace Bigram Language Model: ' laplaceBigramLM = LaplaceBigramLanguageModel(trainingCorpus) laplaceBigramSpell = SpellCorrect(laplaceBigramLM, trainingCorpus) laplaceBigramOutcome = laplaceBigramSpell.evaluate(devCorpus) print str(laplaceBigramOutcome) print 'Stupid Backoff Language Model: ' sbLM = StupidBackoffLanguageModel(trainingCorpus) sbSpell = SpellCorrect(sbLM, trainingCorpus) sbOutcome = sbSpell.evaluate(devCorpus) print str(sbOutcome) print 'Custom Language Model (based on LaplaceBigramLanguageModel): ' customLM = CustomLanguageModel(trainingCorpus) customSpell = SpellCorrect(customLM, trainingCorpus) customOutcome = customSpell.evaluate(devCorpus) print str(customOutcome) print 'Custom Language Model2 (based on StupidBackoffLanguageModel): ' customLM2 = CustomLanguageModel2(trainingCorpus) customSpell2 = SpellCorrect(customLM2, trainingCorpus) customOutcome2 = customSpell2.evaluate(devCorpus) print str(customOutcome2)
def main(): """Trains all of the language models and tests them on the dev data. Change devPath if you wish to do things like test on the training data.""" trainPath = '../data/holbrook-tagged-train.dat' trainingCorpus = HolbrookCorpus(trainPath) devPath = '../data/holbrook-tagged-dev.dat' devCorpus = HolbrookCorpus(devPath) print 'Uniform Language Model: ' uniformLM = UniformLanguageModel(trainingCorpus) uniformSpell = SpellCorrect(uniformLM, trainingCorpus) uniformOutcome = uniformSpell.evaluate(devCorpus) print str(uniformOutcome), '\n' print 'Laplace Unigram Language Model: ' laplaceUnigramLM = LaplaceUnigramLanguageModel(trainingCorpus) laplaceUnigramSpell = SpellCorrect(laplaceUnigramLM, trainingCorpus) laplaceUnigramOutcome = laplaceUnigramSpell.evaluate(devCorpus) print str(laplaceUnigramOutcome), '\n' #It has (accuracy: 0.012739) because of the small corpus (I think ^_^) print 'Good-Turing Unigram Language Model: ' GoodTuringLM = GoodTuringUnigramLanguageModel(trainingCorpus) GoodTuringSpell = SpellCorrect(GoodTuringLM, trainingCorpus) GoodTuringOutcome = GoodTuringSpell.evaluate(devCorpus) print str(GoodTuringOutcome), '\n' #This model takes some time, about (70) seconds print 'Laplace Bigram Language Model: ' laplaceBigramLM = LaplaceBigramLanguageModel(trainingCorpus) laplaceBigramSpell = SpellCorrect(laplaceBigramLM, trainingCorpus) laplaceBigramOutcome = laplaceBigramSpell.evaluate(devCorpus) print str(laplaceBigramOutcome), '\n' #This model takes some time, about (70) seconds print 'Stupid Backoff Language Model: ' sbLM = StupidBackoffLanguageModel(trainingCorpus) sbSpell = SpellCorrect(sbLM, trainingCorpus) sbOutcome = sbSpell.evaluate(devCorpus) print str(sbOutcome), '\n' #This model takes some time, about (70) seconds print 'Custom Language Model: ' customLM = CustomLanguageModel(trainingCorpus) customSpell = SpellCorrect(customLM, trainingCorpus) customOutcome = customSpell.evaluate(devCorpus) print str(customOutcome), '\n'
def main(): """Trains all of the language models and tests them on the dev data. Change devPath if you wish to do things like test on the training data.""" trainPath = '../data/holbrook_tagged_train.dat' trainingCorpus = HolbrookCorpus(trainPath) devPath = '../data/holbrook_tagged_dev.dat' devCorpus = HolbrookCorpus(devPath) print('Uniform Language Model: ') uniformLM = UniformLanguageModel(trainingCorpus) uniformSpell = SpellCorrect(uniformLM, trainingCorpus) uniformOutcome = uniformSpell.evaluate(devCorpus) print(str(uniformOutcome)) print("=================================================") print('Laplace Unigram Language Model: ') laplaceUnigramLM = LaplaceUnigramLanguageModel(trainingCorpus) laplaceUnigramSpell = SpellCorrect(laplaceUnigramLM, trainingCorpus) laplaceUnigramOutcome = laplaceUnigramSpell.evaluate(devCorpus) print(str(laplaceUnigramOutcome)) print("=================================================") print('Laplace Bigram Language Model: ') laplaceBigramLM = LaplaceBigramLanguageModel(trainingCorpus) laplaceBigramSpell = SpellCorrect(laplaceBigramLM, trainingCorpus) laplaceBigramOutcome = laplaceBigramSpell.evaluate(devCorpus) print(str(laplaceBigramOutcome)) print("=================================================") print('Stupid Backoff Language Model: ') sbLM = StupidBackoffLanguageModel(trainingCorpus) sbSpell = SpellCorrect(sbLM, trainingCorpus) sbOutcome = sbSpell.evaluate(devCorpus) print(str(sbOutcome)) print('Custom Language Model: ') customLM = CustomLanguageModel(trainingCorpus) customSpell = SpellCorrect(customLM, trainingCorpus) customOutcome = customSpell.evaluate(devCorpus) print(str(customOutcome))