def hillclimb(self, alignment_info, j_pegged=None): """ Starting from the alignment in ``alignment_info``, look at neighboring alignments iteratively for the best one, according to Model 4 Note that Model 4 scoring is used instead of Model 5 because the latter is too expensive to compute. There is no guarantee that the best alignment in the alignment space will be found, because the algorithm might be stuck in a local maximum. :param j_pegged: If specified, the search will be constrained to alignments where ``j_pegged`` remains unchanged :type j_pegged: int :return: The best alignment found from hill climbing :rtype: AlignmentInfo """ alignment = alignment_info # alias with shorter name max_probability = IBMModel4.model4_prob_t_a_given_s(alignment, self) while True: old_alignment = alignment for neighbor_alignment in self.neighboring(alignment, j_pegged): neighbor_probability = IBMModel4.model4_prob_t_a_given_s( neighbor_alignment, self ) if neighbor_probability > max_probability: alignment = neighbor_alignment max_probability = neighbor_probability if alignment == old_alignment: # Until there are no better alignments break alignment.score = max_probability return alignment
def prune(self, alignment_infos): """ Removes alignments from ``alignment_infos`` that have substantially lower Model 4 scores than the best alignment :return: Pruned alignments :rtype: set(AlignmentInfo) """ alignments = [] best_score = 0 for alignment_info in alignment_infos: score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self) best_score = max(score, best_score) alignments.append((alignment_info, score)) threshold = IBMModel5.MIN_SCORE_FACTOR * best_score alignments = [a[0] for a in alignments if a[1] > threshold] return set(alignments)
def prune(self, alignment_infos): """ Removes alignments from ``alignment_infos`` that have substantially lower Model 4 scores than the best alignment :return: Pruned alignments :rtype: set(AlignmentInfo) """ alignments = [] best_score = 0 for alignment_info in alignment_infos: score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self) best_score = max(score, best_score) alignments.append((alignment_info, score)) threshold = IBMModel5.MIN_SCORE_FACTOR * best_score alignments = [a[0] for a in alignments if a[1] > threshold] return set(alignments)
def test_set_uniform_distortion_probabilities_of_max_displacements(self): # arrange src_classes = {"schinken": 0, "eier": 0, "spam": 1} trg_classes = {"ham": 0, "eggs": 1, "spam": 2} corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model4 = IBMModel4(corpus, 0, src_classes, trg_classes) # act model4.set_uniform_probabilities(corpus) # assert # number of displacement values = # 2 *(number of words in longest target sentence - 1) expected_prob = 1.0 / (2 * (4 - 1)) # examine the boundary values for (displacement, src_class, trg_class) self.assertEqual(model4.head_distortion_table[3][0][0], expected_prob) self.assertEqual(model4.head_distortion_table[-3][1][2], expected_prob) self.assertEqual(model4.non_head_distortion_table[3][0], expected_prob) self.assertEqual(model4.non_head_distortion_table[-3][2], expected_prob)
def test_set_uniform_distortion_probabilities_of_non_domain_values(self): # arrange src_classes = {'schinken': 0, 'eier': 0, 'spam': 1} trg_classes = {'ham': 0, 'eggs': 1, 'spam': 2} corpus = [ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']), AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']), ] model4 = IBMModel4(corpus, 0, src_classes, trg_classes) # act model4.set_uniform_probabilities(corpus) # assert # examine displacement values that are not in the training data domain self.assertEqual(model4.head_distortion_table[4][0][0], IBMModel.MIN_PROB) self.assertEqual(model4.head_distortion_table[100][1][2], IBMModel.MIN_PROB) self.assertEqual(model4.non_head_distortion_table[4][0], IBMModel.MIN_PROB) self.assertEqual(model4.non_head_distortion_table[100][2], IBMModel.MIN_PROB)
def __init__(self, sentence_aligned_corpus, iterations, source_word_classes, target_word_classes, probability_tables=None): """ Train on ``sentence_aligned_corpus`` and create a lexical translation model, vacancy models, a fertility model, and a model for generating NULL-aligned words. Translation direction is from ``AlignedSent.mots`` to ``AlignedSent.words``. :param sentence_aligned_corpus: Sentence-aligned parallel corpus :type sentence_aligned_corpus: list(AlignedSent) :param iterations: Number of iterations to run training algorithm :type iterations: int :param source_word_classes: Lookup table that maps a source word to its word class, the latter represented by an integer id :type source_word_classes: dict[str]: int :param target_word_classes: Lookup table that maps a target word to its word class, the latter represented by an integer id :type target_word_classes: dict[str]: int :param probability_tables: Optional. Use this to pass in custom probability values. If not specified, probabilities will be set to a uniform distribution, or some other sensible value. If specified, all the following entry must be present: ``translation_table``, ``alignment_table``, ``fertility_table``, ``p1``, ``head_distortion_table``, ``non_head_distortion_table``, ``head_vacancy_table``, ``non_head_vacancy_table``. See ``IBMModel``, ``IBMModel4``, and ``IBMModel5`` for the type and purpose of these tables. :type probability_tables: dict[str]: object """ super(IBMModel5, self).__init__(sentence_aligned_corpus) self.reset_probabilities() self.src_classes = source_word_classes self.trg_classes = target_word_classes if probability_tables is None: # Get probabilities from IBM model 4 ibm4 = IBMModel4(sentence_aligned_corpus, iterations, source_word_classes, target_word_classes) self.translation_table = ibm4.translation_table self.alignment_table = ibm4.alignment_table self.fertility_table = ibm4.fertility_table self.p1 = ibm4.p1 self.head_distortion_table = ibm4.head_distortion_table self.non_head_distortion_table = ibm4.non_head_distortion_table self.set_uniform_probabilities(sentence_aligned_corpus) else: # Set user-defined probabilities self.translation_table = probability_tables['translation_table'] self.alignment_table = probability_tables['alignment_table'] self.fertility_table = probability_tables['fertility_table'] self.p1 = probability_tables['p1'] self.head_distortion_table = probability_tables[ 'head_distortion_table'] self.non_head_distortion_table = probability_tables[ 'non_head_distortion_table'] self.head_vacancy_table = probability_tables[ 'head_vacancy_table'] self.non_head_vacancy_table = probability_tables[ 'non_head_vacancy_table'] for n in range(0, iterations): self.train(sentence_aligned_corpus)
def test_prob_t_a_given_s(self): # arrange src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] src_classes = {"räucherschinken": 0, "ja": 1, "ich": 2, "esse": 3, "gern": 4} trg_classes = {"ham": 0, "smoked": 1, "i": 3, "love": 4, "to": 2, "eat": 4} corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ["UNUSED"] + trg_sentence, [[3], [1], [4], [], [2], [5, 6]], ) head_distortion_table = defaultdict( lambda: defaultdict(lambda: defaultdict(float)) ) head_distortion_table[1][None][3] = 0.97 # None, i head_distortion_table[3][2][4] = 0.97 # ich, eat head_distortion_table[-2][3][4] = 0.97 # esse, love head_distortion_table[3][4][1] = 0.97 # gern, smoked non_head_distortion_table = defaultdict(lambda: defaultdict(float)) non_head_distortion_table[1][0] = 0.96 # ham translation_table = defaultdict(lambda: defaultdict(float)) translation_table["i"]["ich"] = 0.98 translation_table["love"]["gern"] = 0.98 translation_table["to"][None] = 0.98 translation_table["eat"]["esse"] = 0.98 translation_table["smoked"]["räucherschinken"] = 0.98 translation_table["ham"]["räucherschinken"] = 0.98 fertility_table = defaultdict(lambda: defaultdict(float)) fertility_table[1]["ich"] = 0.99 fertility_table[1]["esse"] = 0.99 fertility_table[0]["ja"] = 0.99 fertility_table[1]["gern"] = 0.99 fertility_table[2]["räucherschinken"] = 0.999 fertility_table[1][None] = 0.99 probabilities = { "p1": 0.167, "translation_table": translation_table, "head_distortion_table": head_distortion_table, "non_head_distortion_table": non_head_distortion_table, "fertility_table": fertility_table, "alignment_table": None, } model4 = IBMModel4(corpus, 0, src_classes, trg_classes, probabilities) # act probability = model4.prob_t_a_given_s(alignment_info) # assert null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 distortion = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96 expected_probability = ( null_generation * fertility * lexical_translation * distortion ) self.assertEqual(round(probability, 4), round(expected_probability, 4))
def test_prob_t_a_given_s(self): # arrange src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken'] trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham'] src_classes = { 'räucherschinken': 0, 'ja': 1, 'ich': 2, 'esse': 3, 'gern': 4 } trg_classes = { 'ham': 0, 'smoked': 1, 'i': 3, 'love': 4, 'to': 2, 'eat': 4 } corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ['UNUSED'] + trg_sentence, [[3], [1], [4], [], [2], [5, 6]], ) head_distortion_table = defaultdict( lambda: defaultdict(lambda: defaultdict(float))) head_distortion_table[1][None][3] = 0.97 # None, i head_distortion_table[3][2][4] = 0.97 # ich, eat head_distortion_table[-2][3][4] = 0.97 # esse, love head_distortion_table[3][4][1] = 0.97 # gern, smoked non_head_distortion_table = defaultdict(lambda: defaultdict(float)) non_head_distortion_table[1][0] = 0.96 # ham translation_table = defaultdict(lambda: defaultdict(float)) translation_table['i']['ich'] = 0.98 translation_table['love']['gern'] = 0.98 translation_table['to'][None] = 0.98 translation_table['eat']['esse'] = 0.98 translation_table['smoked']['räucherschinken'] = 0.98 translation_table['ham']['räucherschinken'] = 0.98 fertility_table = defaultdict(lambda: defaultdict(float)) fertility_table[1]['ich'] = 0.99 fertility_table[1]['esse'] = 0.99 fertility_table[0]['ja'] = 0.99 fertility_table[1]['gern'] = 0.99 fertility_table[2]['räucherschinken'] = 0.999 fertility_table[1][None] = 0.99 probabilities = { 'p1': 0.167, 'translation_table': translation_table, 'head_distortion_table': head_distortion_table, 'non_head_distortion_table': non_head_distortion_table, 'fertility_table': fertility_table, 'alignment_table': None, } model4 = IBMModel4(corpus, 0, src_classes, trg_classes, probabilities) # act probability = model4.prob_t_a_given_s(alignment_info) # assert null_generation = 5 * pow(0.167, 1) * pow(0.833, 4) fertility = 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 1 * 0.99 * 2 * 0.999 lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 distortion = 0.97 * 0.97 * 1 * 0.97 * 0.97 * 0.96 expected_probability = (null_generation * fertility * lexical_translation * distortion) self.assertEqual(round(probability, 4), round(expected_probability, 4))
def generateModels(qtype): # dictionary, pwC, pdf = prepare_corpus("data/linkSO",recompute=False) datadir = "data/linkSO" all_questions = pd.read_csv(join(datadir, "linkso/topublish/" + qtype + "/" + qtype + "_qid2all.txt"), sep='\t', \ names=['qID', 'qHeader', 'qDescription', 'topVotedAnswer', 'type']) similar_docs_file = pd.read_csv(join(datadir, "linkso/topublish/" + qtype + "/" + qtype + "_cosidf.txt"), sep='\t', \ names=['qID1', 'qID2', 'score', 'label'], skiprows=1) filtered_rows = similar_docs_file[similar_docs_file[label] == 1] filtered_columns = filtered_rows.filter(items=[question_id1, question_id2]) bitext_qH_qH = [] bitext_qD_qD = [] bitext_qHqD_qHqD = [] loop_counter = 0 for each_row in filtered_columns.itertuples(): q1ID = each_row[1] q2ID = each_row[2] q1_row = all_questions.loc[all_questions[question_id] == q1ID] q1header = str(q1_row[question_header].values[0]).split() q1desc = str(q1_row[question_description].values[0]).split() q1ans = str(q1_row[top_answer].values[0]).split() q2_row = all_questions.loc[all_questions[question_id] == q2ID] q2header = str(q2_row[question_header].values[0]).split() q2desc = str(q2_row[question_description].values[0]).split() q2ans = str(q2_row[top_answer].values[0]).split() # print("\nQ1 Header:", q1header) # print("Q1 Desc:", q1desc) # print("Q1 Answer:", q1ans) # print("Q2:", q2header) # print("Q2 Desc:", q2desc) # print("Q2 Answer:", q2ans) bitext_qH_qH.append(AlignedSent(q1header, q2header)) bitext_qD_qD.append(AlignedSent(q1desc, q2desc)) bitext_qHqD_qHqD.append(AlignedSent(q1header + q1desc, q2header + q2desc)) loop_counter += 1 # Model 1 print("Training Model1 QH QH..") start = time.time() ibmQH = IBMModel1(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model1_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model1 QD QD..") start = time.time() ibmQD = IBMModel1(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model1_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model1 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel1(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model1_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10)) # Model 2 print("Training Model2 QH QH..") start = time.time() ibmQH = IBMModel2(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model2_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model2 QD QD..") start = time.time() ibmQD = IBMModel2(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model2_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model2 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel2(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model2_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10)) # Model 3 print("Training Model3 QH QH..") start = time.time() ibmQH = IBMModel3(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model3_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model3 QD QD..") start = time.time() ibmQD = IBMModel3(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model3_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model3 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel3(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model3_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10)) # Model 4 print("Training Model4 QH QH..") start = time.time() ibmQH = IBMModel4(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model4_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model4 QD QD..") start = time.time() ibmQD = IBMModel4(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model4_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model4 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel4(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model4_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10)) # Model5 print("Training Model5 QH QH..") start = time.time() ibmQH = IBMModel5(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model5_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model5 QD QD..") start = time.time() ibmQD = IBMModel5(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model5_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model5 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel5(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model5_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10))