def output(self, partId, ch_aux): """Uses the student code to compute the output for test cases.""" trainCorpus = HolbrookCorpus('../data/holbrook-tagged-train.dat') if partId in [1, 2]: editModel = EditModel('../data/count_1edit.txt', trainCorpus) return json.dumps([[(e.editedWord, e.rule()) for e in editModel.edits(line.strip())] for line in ch_aux.split("\n")]) else: testCorpus = HolbrookCorpus() testCorpus.slurpString(ch_aux) lm = None if partId in [3, 4]: lm = LaplaceUnigramLanguageModel(trainCorpus) elif partId in [5, 6]: lm = LaplaceBigramLanguageModel(trainCorpus) elif partId in [7, 8]: lm = StupidBackoffLanguageModel(trainCorpus) elif partId in [9, 10]: lm = CustomLanguageModel(trainCorpus) else: print 'Unknown partId: " + partId' return None speller = SpellCorrect(lm, trainCorpus) output = speller.correctCorpus(testCorpus) # put in the part ID as well output = '[["%d"],%s' % (partId, output[1:]) return output
def scan_edits(edits_file): print >> sys.stderr, "Processing " + edits_file editmodel = EditModel('') edit_probs = Counter() edits1 = read_edit1s(edits_file) print >> sys.stderr, "Counting" for error, correct in edits1: count_chars(correct) v, edit_types = editmodel.get_edits(correct, error) edit_types = set(edit_types) edit_types = [each for each in edit_types if each[0] != editmodel.nc] edit_probs.update(edit_types) num_char_unigrams = len(char_counts) print >> sys.stderr, "Normalizing" norm_edit_probs = {} for kind, str in edit_probs.keys(): if kind == editmodel.dl: norm_edit_probs[(kind, str)] = (edit_probs[(kind, str)] + 1.0) / ( get_char_bigram_count(str) + num_char_unigrams + 1) elif kind == editmodel.ins: norm_edit_probs[(kind, str)] = (edit_probs[(kind, str)] + 1.0) / ( get_char_unigram_count(str[0]) + num_char_unigrams + 1) elif kind == editmodel.sub: #If this is a substitution, reverse the characters because of bug in get_edits norm_edit_probs[( kind, str[::-1])] = (edit_probs[(kind, str)] + 1.0) / ( get_char_unigram_count(str[0]) + num_char_unigrams + 1) elif kind == editmodel.trs: norm_edit_probs[(kind, str)] = (edit_probs[(kind, str)] + 1.0) / ( get_char_bigram_count(str) + num_char_unigrams + 1) print >> sys.stderr, "Writing to file - edits_model" serialize_data(norm_edit_probs, 'edit_model') serialize_data(dict(char_counts), 'char_unigram_model') serialize_data(dict(char_bigram_counts), 'char_bigram_model')
def __init__(self, lm, corpus): self.languageModel = lm self.editModel = EditModel('data/count_1edit.txt', corpus)
def __init__(self, lm, corpus): """initializes the language model.""" self.languageModel = lm self.editModel = EditModel('../data/count_1edit.txt', corpus)
return result, max if __name__ == '__main__': if len(sys.argv) < 4: print "Usage: python corrector.py <dev | test> <uniform | empirical> <queries file>" exit(0) queries_file = sys.argv[3] queries, gold, google = read_query_data(queries_file) kind_of_editmodel = sys.argv[2] #Read in unigram and bigram probs print >> sys.stderr, "Loading language model" languagemodel = LanguageModel('unigram_model','bigram_model') print >> sys.stderr, "Loading edit model" editmodel = EditModel(kind_of_editmodel,languagemodel) languagemodel.init_edit_model(editmodel) print >> sys.stderr,"Loading spell correct" spell_corrector = SpellCorrect(languagemodel, editmodel) answers = [] qc = 0 for eachquery in queries: answer = spell_corrector.spell_correct_query(eachquery) print answer print >> sys.stderr, "%d" % (qc) qc+=1 answers.append(answer) #Accuracy evaluation wrong = 0 correct = 0 for i in range(len(answers)):
def __init__(self, lm, corpus): self.lm = lm self.editModel = EditModel("./data/count_1edit.txt", corpus)