def run(filename, iterations): # global variables utilized in the assessment of the IBM Model global ibm2 global corpus # construct and modify corpus by adding the system alignments to every sentence pair corpus = compile_corpus(filename) ibm2 = IBMModel2(corpus, iterations) # produce the alignments of the test sentences get_alignments("data/evaluation tests/test sentences/test.spanish")
def IBM1_IBM2(filename): file = json.load(open(filename)) #print("Corpus:"filename) #to store sentences fr_sentence = [] en_sentence = [] #to store translation probability #append each sentence into lists for i in range(len(file)): fr_sentence.append(file[i]['fr']) en_sentence.append(file[i]['en']) n = len(en_sentence) bitext = [] #store all words in sets for i in range(n): en_word = en_sentence[i].split(' ') fr_word = fr_sentence[i].split(' ') bitext.append(AlignedSent(fr_word, en_word)) ibm1 = IBMModel1(bitext, 2000) #ibm2 = IBMModel2(bitext, 2000) for i in range(n) : test_sentence = bitext[i] print(test_sentence.words) print(test_sentence.mots) print("Alignment according to IBM1 nltk model :") print(test_sentence.alignment) print('\n\n') ibm2 = IBMModel2(bitext, 2000) for i in range(n): test_sentence = bitext[i] print(test_sentence.words) #print('\n') print(test_sentence.mots) #print('\n') print("Alignment according to IBM2 nltk model : ") print(test_sentence.alignment) print('\n\n')
def compare_a_nltk_train(t_ibm1, max_steps, src, tar): print('Compare nltk to train() implementation:') #train() implementation max_le = max([len(e) for e in src]) max_lf = max([len(f) for f in tar]) en_word_dict, tk_word_dict = dicts_for_train_comparison(src, tar) num_of_e_word = len(en_word_dict) num_of_f_word = len(tk_word_dict) t_e2f_ibm1_matrix = np.full((num_of_e_word, num_of_f_word), 0, dtype=float) for (e_j, f_i), t_val in t_ibm1.items(): t_e2f_ibm1_matrix[en_word_dict[e_j]][tk_word_dict[f_i]] = t_val t_e_f_mat, a_i_le_lf_mat = train(t_e2f_ibm1_matrix, en_word_dict, tk_word_dict, src, tar, max_le, max_lf, max_steps / 6) #nltk implementation aligned = test_sets_to_aligned(src, tar) ibm2 = IBMModel2(aligned, max_steps) a = ibm2.alignment_table t = ibm2.translation_table correct0 = 0 sum = 0 for i, REST1 in enumerate(a_i_le_lf_mat): for j, REST2 in enumerate(REST1): for l_f, REST3 in enumerate(REST2): for l_e, val in enumerate(REST3): #if val != 0: print('ot',i,j,l_e+1,l_f+1, val) bool_ = (a[j][i][l_f+1][l_e+1] > 0.7) == (val > 0.7) if bool_ == False: if DEBUG: print('wrong a:', a[j][i][l_f+1][l_e+1], '!=', val, 'for i', i, ' j',j, ' l_e', l_e, ' l_f', l_f) else: correct0 += 1 sum += 1 print('a values ', 100 * (correct0 / sum), '% correct,', correct0, 'values wrong.\n') en_word_dict, tk_word_dict = dicts_for_train_comparison(src, tar) correct0 = 0 sum = 0 for sentence, srcs in enumerate(src): tars = tar[sentence] for index, eng_word in enumerate(srcs): # for all words for index, tur_word in enumerate(tars): # for all words idx_tur_in_dict = tk_word_dict[tur_word] idx_eng_in_dict = en_word_dict[eng_word] if idx_tur_in_dict < t_e_f_mat.shape[0] and idx_eng_in_dict < t_e_f_mat.shape[1]: val = t_e_f_mat[idx_tur_in_dict][idx_eng_in_dict] bool_ = (t[f_i][e_j] > 0.7) == (val > 0.7) if bool_ == False: if DEBUG: print('wrong a:', t[f_i][e_j], '!=', val, 'for i', i, ' j', j, ' l_e', l_e, ' l_f', l_f) else: correct0 += 1 sum += 1 print('t values ', 100 * (correct0 / sum), '% correct,', correct0, 'values wrong.\n')
def use_IBM2(corpus,settings): ''' Gives back the result on a corpus containing Aligned Objects, on using IBM Model 2 Inputs: corpus = A list of Alignment Objects, which inturn contain tuples of the source language, the target language and the possible alignment settings = The hardcoded options set within the "langsettings.json" file Outputs: ibm2 = An object containing the mapping for the foreign words and the translated words and the probabilities of each corpus = The modified input, which has the alignments for each word. ''' # train the model ibm2=IBMModel2(corpus,settings['iterations']) return ibm2,corpus
def test_set_uniform_alignment_probabilities_of_non_domain_values(self): # arrange corpus = [ AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]), AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]), ] model2 = IBMModel2(corpus, 0) # act model2.set_uniform_probabilities(corpus) # assert # examine i and j values that are not in the training data domain self.assertEqual(model2.alignment_table[99][1][3][2], IBMModel.MIN_PROB) self.assertEqual(model2.alignment_table[2][99][2][4], IBMModel.MIN_PROB)
def test_set_uniform_alignment_probabilities(self): # arrange corpus = [ AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']), AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']), ] model2 = IBMModel2(corpus, 0) # act model2.set_uniform_probabilities(corpus) # assert # expected_prob = 1.0 / (length of source sentence + 1) self.assertEqual(model2.alignment_table[0][1][3][2], 1.0 / 4) self.assertEqual(model2.alignment_table[2][4][2][4], 1.0 / 3)
def task_2(path, alignments_pred): """ Task 2: Comparing our alignment results with that of NLTK library's output of IBM Model 1 and IBM Model 2 :param path: path for data :param alignments_pred: alignments computed in task 1 :return: parallel_corpus, phrase_extraction_corpus_en, phrase_extraction_corpus_fr """ parallel_corpus = [] phrase_extraction_corpus_en = [] phrase_extraction_corpus_fr = [] with open(path, 'r') as f: d = json.load(f) for sent in d: phrase_extraction_corpus_en.append(sent['en']) phrase_extraction_corpus_fr.append(sent['fr']) fr_words = sent['fr'].split() en_words = sent['en'].split() parallel_corpus.append(AlignedSent(en_words, fr_words)) # MODEL - 2 print("******IBM Model-2*******") ibm2 = IBMModel2(parallel_corpus, 50) for test in parallel_corpus: print("en_sentence: {}".format(test.words)) print("fr_sentence: {}".format(test.mots)) try: print("nltk alignment: {}".format(test.alignment)) except: print("nltk ibm model 2 alignment failed") # MODEL-1 ibm1 = IBMModel1(parallel_corpus, 50) print("******IBM Model 1*******") for test in parallel_corpus: print("en_sentence: {}".format(test.words)) print("fr_sentence: {}".format(test.mots)) try: print("nltk alignment: {}".format(test.alignment)) except: print("nltk ibm model 1 alignment failed") str_test = ' '.join(word for word in test.words) print("predicted alignment: {}\n".format(alignemnts_pred[str_test])) return parallel_corpus, phrase_extraction_corpus_en, phrase_extraction_corpus_fr
def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): """ Train on ``sentence_aligned_corpus`` and create a lexical translation model, a distortion model, a fertility model, and a model for generating NULL-aligned words. Translation direction is from ``AlignedSent.mots`` to ``AlignedSent.words``. :param sentence_aligned_corpus: Sentence-aligned parallel corpus :type sentence_aligned_corpus: list(AlignedSent) :param iterations: Number of iterations to run training algorithm :type iterations: int :param probability_tables: Optional. Use this to pass in custom probability values. If not specified, probabilities will be set to a uniform distribution, or some other sensible value. If specified, all the following entries must be present: ``translation_table``, ``alignment_table``, ``fertility_table``, ``p1``, ``distortion_table``. See ``IBMModel`` for the type and purpose of these tables. :type probability_tables: dict[str]: object """ super(IBMModel3, self).__init__(sentence_aligned_corpus) self.reset_probabilities() if probability_tables is None: # Get translation and alignment probabilities from IBM Model 2 ibm2 = IBMModel2(sentence_aligned_corpus, iterations) self.translation_table = ibm2.translation_table self.alignment_table = ibm2.alignment_table self.set_uniform_probabilities(sentence_aligned_corpus) else: # Set user-defined probabilities self.translation_table = probability_tables["translation_table"] self.alignment_table = probability_tables["alignment_table"] self.fertility_table = probability_tables["fertility_table"] self.p1 = probability_tables["p1"] self.distortion_table = probability_tables["distortion_table"] for n in range(0, iterations): self.train(sentence_aligned_corpus)
def compare_ibm_2_nltk(t, max_steps, a, src, tar): print('Compare my IBM Model 2 to nltk library:') aligned = test_sets_to_aligned(src, tar) ibm2 = IBMModel2(aligned, max_steps) compare_t_table(ibm2, t) correct = True correct_a = 0 for (i,j,l_e,l_f) in a: bool_ = (a[(i,j,l_e,l_f)] > 0.7) == (ibm2.alignment_table[j][i][l_f][l_e] > 0.7) if bool_ == False: if DEBUG: print('wrong a:', a[(i,j,l_e,l_f)], '!=',ibm2.alignment_table[j][i][l_f][l_e],'for i',i,' j',j,' l_e',l_e,' l_f',l_f) correct = False else: correct_a += 1 #print(' a:', a[(i,j,l_e,l_f)], 'for i',i,' j',j,' l_e',l_e,' l_f',l_f) if correct: print('All a values were correct.\n') else: print('a values ', 100 * (correct_a / len(a)), '% correct,',correct_a,'values wrong.\n')
def test_prob_t_a_given_s(self): # arrange src_sentence = ["ich", 'esse', 'ja', 'gern', 'räucherschinken'] trg_sentence = ['i', 'love', 'to', 'eat', 'smoked', 'ham'] corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ['UNUSED'] + trg_sentence, None, ) translation_table = defaultdict(lambda: defaultdict(float)) translation_table['i']['ich'] = 0.98 translation_table['love']['gern'] = 0.98 translation_table['to'][None] = 0.98 translation_table['eat']['esse'] = 0.98 translation_table['smoked']['räucherschinken'] = 0.98 translation_table['ham']['räucherschinken'] = 0.98 alignment_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))) ) alignment_table[0][3][5][6] = 0.97 # None -> to alignment_table[1][1][5][6] = 0.97 # ich -> i alignment_table[2][4][5][6] = 0.97 # esse -> eat alignment_table[4][2][5][6] = 0.97 # gern -> love alignment_table[5][5][5][6] = 0.96 # räucherschinken -> smoked alignment_table[5][6][5][6] = 0.96 # räucherschinken -> ham model2 = IBMModel2(corpus, 0) model2.translation_table = translation_table model2.alignment_table = alignment_table # act probability = model2.prob_t_a_given_s(alignment_info) # assert lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 alignment = 0.97 * 0.97 * 0.97 * 0.97 * 0.96 * 0.96 expected_probability = lexical_translation * alignment self.assertEqual(round(probability, 4), round(expected_probability, 4))
def test_prob_t_a_given_s(self): # arrange src_sentence = ["ich", "esse", "ja", "gern", "räucherschinken"] trg_sentence = ["i", "love", "to", "eat", "smoked", "ham"] corpus = [AlignedSent(trg_sentence, src_sentence)] alignment_info = AlignmentInfo( (0, 1, 4, 0, 2, 5, 5), [None] + src_sentence, ["UNUSED"] + trg_sentence, None, ) translation_table = defaultdict(lambda: defaultdict(float)) translation_table["i"]["ich"] = 0.98 translation_table["love"]["gern"] = 0.98 translation_table["to"][None] = 0.98 translation_table["eat"]["esse"] = 0.98 translation_table["smoked"]["räucherschinken"] = 0.98 translation_table["ham"]["räucherschinken"] = 0.98 alignment_table = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float))) ) alignment_table[0][3][5][6] = 0.97 # None -> to alignment_table[1][1][5][6] = 0.97 # ich -> i alignment_table[2][4][5][6] = 0.97 # esse -> eat alignment_table[4][2][5][6] = 0.97 # gern -> love alignment_table[5][5][5][6] = 0.96 # räucherschinken -> smoked alignment_table[5][6][5][6] = 0.96 # räucherschinken -> ham model2 = IBMModel2(corpus, 0) model2.translation_table = translation_table model2.alignment_table = alignment_table # act probability = model2.prob_t_a_given_s(alignment_info) # assert lexical_translation = 0.98 * 0.98 * 0.98 * 0.98 * 0.98 * 0.98 alignment = 0.97 * 0.97 * 0.97 * 0.97 * 0.96 * 0.96 expected_probability = lexical_translation * alignment self.assertEqual(round(probability, 4), round(expected_probability, 4))
def IBM_Model_2(corpus): bitext = [] for x in corpus: bitext.append( AlignedSent(x[SOURCE_LANGUAGE].split(), x[DESTINATION_LANGUAGE].split())) print("IBM MODEL 2 :") print("") ibm2 = IBMModel2(bitext, NUMBER_OF_ITERATIONS) #pretty(ibm2.translation_table) for test in bitext: print("Source sentence:") print(test.words) print("Destination sentence:") print(test.mots) print("Alignment:") print(test.alignment) print("") print("----------------------------------------") return ibm2.translation_table, bitext
def __init__(self, sentence_aligned_corpus, iterations, probability_tables=None): super(IBMModel3, self).__init__(sentence_aligned_corpus) self.reset_probabilities() if probability_tables is None: ibm2 = IBMModel2(sentence_aligned_corpus, iterations) self.translation_table = ibm2.translation_table self.alignment_table = ibm2.alignment_table self.set_uniform_probabilities(sentence_aligned_corpus) else: self.translation_table = probability_tables['translation_table'] self.alignment_table = probability_tables['alignment_table'] self.fertility_table = probability_tables['fertility_table'] self.p1 = probability_tables['p1'] self.distortion_table = probability_tables['distortion_table'] for n in range(0, iterations): self.train(sentence_aligned_corpus)
list_fore = [] for i in range(len(x)): list_eng.append(x[i].split()) list_fore.append(y[i].split()) data = [] bitext = [] for i in range(len(list_eng)): l = [] l.append(list_eng[i]) l.append(list_fore[i]) data.append(l) bitext.append(AlignedSent(list_eng[i],list_fore[i])) ibm1 = IBMModel1(bitext, 5) ibm2 = IBMModel2(bitext, 5) corpus = [] for i in data: for j in i: for k in j: if not k in corpus: corpus.append(k) print(bitext) print("first word " + "second word " + " ibmmodel1 " + " ibmmodel2 ") for i in corpus : for j in corpus : if((i!=j) and (ibm1.translation_table[i][j] > 0.000005 or ibm2.translation_table[i][j] > 0.000005 ) ): print(i + " " + j + " " + str( ibm1.translation_table[i][j]) + " "+ str(ibm2.translation_table[i][j])) #for i in data :
# with open('ibm_part1.pkl', 'rb') as file: # ibm_part1 = pickle.load(file) # print('Loading ibm model 2') # with open('ibm_part2.pkl', 'rb') as file: # ibm_part2 = pickle.load(file) # print('Loading ibm model 3') # with open('ibm_part3.pkl', 'rb') as file: # ibm_part3 = pickle.load(file) # # with open('ibm_part4.pkl', 'rb') as file: # # ibm_part4 = pickle.load(file) # # with open('ibm_part5.pkl', 'rb') as file: # # ibm_part5 = pickle.load(file) # else: #bitext = bitext_part1+bitext_part2+bitext_part3#+bitext_part4+bitext_part5 print('Creating ibm part 1') ibm_part1 = IBMModel2(bitext_part1, 5) # with open('ibm_part1.pkl', 'wb') as file: # pickle.dump(ibm_part1, file) # # print('Creating ibm part 2') # ibm_part2 = IBMModel2(bitext_part2, 5) # with open('ibm_part2.pkl', 'wb') as file: # pickle.dump(ibm_part2, file) # # print('Creating ibm part 3') # ibm_part3 = IBMModel2(bitext_part3, 5) # with open('ibm_part3.pkl', 'wb') as file: # pickle.dump(ibm_part3, file) # # print('Creating ibm part 4') # ibm_part4 = IBMModel2(bitext_part4, 5)
while i < len(lines): if i % 2 == 0: # Will use the ibm model to calculate p(f|e) # With the noisy model, the translation direction is reversed! bitext.append( AlignedSent([t.lower() for t in nltk.word_tokenize(lines[i + 1])], [t.lower() for t in nltk.word_tokenize(lines[i])])) fr_text += nltk.word_tokenize(lines[i]) en_text += nltk.word_tokenize(lines[i + 1]) i += 1 train_file.close() fr_text = nltk.Text(fr_text) en_text = nltk.Text(en_text) en_vocab = [v for v in en_text.vocab()] ibm2 = IBMModel2(bitext, len(bitext) * 5) cfd_len_l_m = {} len_l_m_list = [] for t in bitext: l = len(t.mots) m = len(t.words) len_l_m_list += [(l, m)] cfd_len_l_m = nltk.ConditionalFreqDist(len_l_m_list) # cfd_dict = {} # # Get the cfd for the alignment # for t in bitext: # l = len(t.mots) # m = len(t.words) # cfd_dict[(l,m)] = nltk.ConditionalFreqDist(t.alignment)
wc = FreqDist() # Structures: 1-paragraph alignment only, 2-sentence alignment based on paragraphs, 3-direct sentence alignment structures = {1: "para", 2: "psent", 3: "sent"} struct_num = 1 for i in range(1, 44): en_path = 'translation-dashboard/data/en-ba-' + structures[struct_num] + '-align/en-chapter-' + str(i) + '.txt' ba_path = 'translation-dashboard/data/en-ba-' + structures[struct_num] + '-align/ba-chapter-' + str(i) + '.txt' aligned_paras.extend(para_as_sent(en_path, ba_path)) wc += word_count(en_path) # print (wc.freq("i")) num_iterations = 20 start = timer() model = IBMModel2(aligned_paras, num_iterations) end = timer() timeelapsed = end - start # timer will only evaluate time taken to run IBM Models with open('align_models/ibm-model-runtimes.csv', 'a', encoding='utf-8') as output_file: output_writer = csv.writer(output_file, delimiter='\t') output_writer.writerow( ["2", str(num_iterations), timeelapsed, socket.gethostname(), 'struct' + str(struct_num)]) output_file.close() # Save model and word count with open('align_models/ibm2.model', 'wb') as m_file: dill.dump(model, m_file) with open('align_models/en.wc', 'wb') as wc_file: pickle.dump(wc, wc_file) write_common_words_translations(model, wc, 50, 'align_models/word-align.csv')
def generateModels(qtype): # dictionary, pwC, pdf = prepare_corpus("data/linkSO",recompute=False) datadir = "data/linkSO" all_questions = pd.read_csv(join(datadir, "linkso/topublish/" + qtype + "/" + qtype + "_qid2all.txt"), sep='\t', \ names=['qID', 'qHeader', 'qDescription', 'topVotedAnswer', 'type']) similar_docs_file = pd.read_csv(join(datadir, "linkso/topublish/" + qtype + "/" + qtype + "_cosidf.txt"), sep='\t', \ names=['qID1', 'qID2', 'score', 'label'], skiprows=1) filtered_rows = similar_docs_file[similar_docs_file[label] == 1] filtered_columns = filtered_rows.filter(items=[question_id1, question_id2]) bitext_qH_qH = [] bitext_qD_qD = [] bitext_qHqD_qHqD = [] loop_counter = 0 for each_row in filtered_columns.itertuples(): q1ID = each_row[1] q2ID = each_row[2] q1_row = all_questions.loc[all_questions[question_id] == q1ID] q1header = str(q1_row[question_header].values[0]).split() q1desc = str(q1_row[question_description].values[0]).split() q1ans = str(q1_row[top_answer].values[0]).split() q2_row = all_questions.loc[all_questions[question_id] == q2ID] q2header = str(q2_row[question_header].values[0]).split() q2desc = str(q2_row[question_description].values[0]).split() q2ans = str(q2_row[top_answer].values[0]).split() # print("\nQ1 Header:", q1header) # print("Q1 Desc:", q1desc) # print("Q1 Answer:", q1ans) # print("Q2:", q2header) # print("Q2 Desc:", q2desc) # print("Q2 Answer:", q2ans) bitext_qH_qH.append(AlignedSent(q1header, q2header)) bitext_qD_qD.append(AlignedSent(q1desc, q2desc)) bitext_qHqD_qHqD.append(AlignedSent(q1header + q1desc, q2header + q2desc)) loop_counter += 1 # Model 1 print("Training Model1 QH QH..") start = time.time() ibmQH = IBMModel1(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model1_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model1 QD QD..") start = time.time() ibmQD = IBMModel1(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model1_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model1 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel1(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model1_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10)) # Model 2 print("Training Model2 QH QH..") start = time.time() ibmQH = IBMModel2(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model2_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model2 QD QD..") start = time.time() ibmQD = IBMModel2(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model2_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model2 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel2(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model2_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10)) # Model 3 print("Training Model3 QH QH..") start = time.time() ibmQH = IBMModel3(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model3_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model3 QD QD..") start = time.time() ibmQD = IBMModel3(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model3_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model3 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel3(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model3_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10)) # Model 4 print("Training Model4 QH QH..") start = time.time() ibmQH = IBMModel4(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model4_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model4 QD QD..") start = time.time() ibmQD = IBMModel4(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model4_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model4 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel4(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model4_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10)) # Model5 print("Training Model5 QH QH..") start = time.time() ibmQH = IBMModel5(bitext_qH_qH, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQH_Model5_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQH, fout) print("Training Model5 QD QD..") start = time.time() ibmQD = IBMModel5(bitext_qD_qD, 50) print("Model QD QD trained.. In", time.time() - start, " seconds..") with open('modelQDQD_Model5_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQD, fout) print("Training Model5 QHQD QHQD..") start = time.time() ibmQHQD = IBMModel5(bitext_qHqD_qHqD, 50) print("Model QH QH trained.. In", time.time() - start, " seconds..") with open('modelQHQD_Model5_' + qtype + '.pk', 'wb') as fout: pickle.dump(ibmQHQD, fout) print(round(ibmQH.translation_table['html']['web'], 10)) print(round(ibmQD.translation_table['html']['web'], 10)) print(round(ibmQHQD.translation_table['html']['web'], 10))
#print(test_sentence) print(align_ibm) #print(" ") print(" ") # In[268]: #TRAINING IBM MODEL 2 #bitext_2 will have the parallel corpus bitext_2 = [] for i in range(len(fr)): bitext_2.append(AlignedSent(en[i], fr[i])) #Training for 100 iterations ibm2 = IBMModel2(bitext_2, 1000) #trans_dict_2 will contain the translation probabilities for each distinct pair of words #pair being of the form (english_word,french_word) trans_dict_2 = ibm2.translation_table # In[269]: #ALIGNMENTS OF IBM MODEL 2 print("IBM MODEL 2") for i in range(len(fr)): test_sentence = bitext_2[i] align_ibm2 = test_sentence.alignment #print(test_sentence) print(align_ibm2) #print(" ") print(" ")