コード例 #1
0
    def test_set_uniform_translation_probabilities_of_non_domain_values(self):
        # arrange
        corpus = [
            AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]),
            AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]),
        ]
        model1 = IBMModel1(corpus, 0)

        # act
        model1.set_uniform_probabilities(corpus)

        # assert
        # examine target words that are not in the training data domain
        self.assertEqual(model1.translation_table["parrot"]["eier"],
                         IBMModel.MIN_PROB)
コード例 #2
0
    def test_set_uniform_translation_probabilities(self):
        # arrange
        corpus = [
            AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
            AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
        ]
        model1 = IBMModel1(corpus, 0)

        # act
        model1.set_uniform_probabilities(corpus)

        # assert
        # expected_prob = 1.0 / (target vocab size + 1)
        self.assertEqual(model1.translation_table['ham']['eier'], 1.0 / 3)
        self.assertEqual(model1.translation_table['eggs'][None], 1.0 / 3)
コード例 #3
0
ファイル: test_ibm1.py プロジェクト: aczapata/twitter
    def test_set_uniform_translation_probabilities_of_non_domain_values(self):
        # arrange
        corpus = [
            AlignedSent(['ham', 'eggs'], ['schinken', 'schinken', 'eier']),
            AlignedSent(['spam', 'spam', 'spam', 'spam'], ['spam', 'spam']),
        ]
        model1 = IBMModel1(corpus, 0)

        # act
        model1.set_uniform_probabilities(corpus)

        # assert
        # examine target words that are not in the training data domain
        self.assertEqual(model1.translation_table['parrot']['eier'],
                         IBMModel.MIN_PROB)
コード例 #4
0
    def test_set_uniform_translation_probabilities(self):
        # arrange
        corpus = [
            AlignedSent(["ham", "eggs"], ["schinken", "schinken", "eier"]),
            AlignedSent(["spam", "spam", "spam", "spam"], ["spam", "spam"]),
        ]
        model1 = IBMModel1(corpus, 0)

        # act
        model1.set_uniform_probabilities(corpus)

        # assert
        # expected_prob = 1.0 / (target vocab size + 1)
        self.assertEqual(model1.translation_table["ham"]["eier"], 1.0 / 3)
        self.assertEqual(model1.translation_table["eggs"][None], 1.0 / 3)
コード例 #5
0
ファイル: tasks.py プロジェクト: gaurabdg/IBM-Model-1
def task_2(path, alignments_pred):
    """
    Task 2: Comparing our alignment results with that of NLTK library's output of IBM Model 1 and IBM Model 2
    :param path: path for data
    :param alignments_pred: alignments computed in task 1
    :return: parallel_corpus, phrase_extraction_corpus_en, phrase_extraction_corpus_fr
    """
    parallel_corpus = []
    phrase_extraction_corpus_en = []
    phrase_extraction_corpus_fr = []
    with open(path, 'r') as f:
        d = json.load(f)

    for sent in d:
        phrase_extraction_corpus_en.append(sent['en'])
        phrase_extraction_corpus_fr.append(sent['fr'])
        fr_words = sent['fr'].split()
        en_words = sent['en'].split()
        parallel_corpus.append(AlignedSent(en_words, fr_words))

    # MODEL - 2

    print("******IBM Model-2*******")

    ibm2 = IBMModel2(parallel_corpus, 50)
    for test in parallel_corpus:
        print("en_sentence: {}".format(test.words))
        print("fr_sentence: {}".format(test.mots))
        try:
            print("nltk alignment: {}".format(test.alignment))
        except:
            print("nltk ibm model 2 alignment failed")

    #  MODEL-1

    ibm1 = IBMModel1(parallel_corpus, 50)
    print("******IBM Model 1*******")
    for test in parallel_corpus:
        print("en_sentence: {}".format(test.words))
        print("fr_sentence: {}".format(test.mots))
        try:
            print("nltk alignment: {}".format(test.alignment))
        except:
            print("nltk ibm model 1 alignment failed")
        str_test = ' '.join(word for word in test.words)
        print("predicted alignment: {}\n".format(alignemnts_pred[str_test]))

    return parallel_corpus, phrase_extraction_corpus_en, phrase_extraction_corpus_fr
コード例 #6
0
    def __init__(self, sentence_aligned_corpus, output_file, iterations):
        """
        Train on ``sentence_aligned_corpus`` and create a lexical
        translation model and an alignment model.

        Translation direction is from ``AlignedSent.mots`` to
        ``AlignedSent.words``.

        Runs a few iterations of Model 1 training to initialize
        model parameters.

        :param sentence_aligned_corpus: Sentence-aligned parallel corpus
        :type sentence_aligned_corpus: list(AlignedSent)

        :param iterations: Number of iterations to run training algorithm
        :type iterations: int
        """
        print "initializing model2..."
        super(IBMModel2, self).__init__(sentence_aligned_corpus)

        # Get initial translation probability distribution
        # from a few iterations of Model 1 training.
        print "start training model1..."
        ibm1 = IBMModel1(sentence_aligned_corpus, 10)
        print "finished training model1..."
        self.translation_table = ibm1.translation_table

        # Initialize the distribution of alignment probability,
        # a(i | j,l,m) = 1 / (l+1) for all i, j, l, m
        for aligned_sentence in sentence_aligned_corpus:
            l = len(aligned_sentence.mots)
            m = len(aligned_sentence.words)
            initial_value = 1 / (l + 1)
            if initial_value > IBMModel.MIN_PROB:
                for i in range(0, l + 1):
                    for j in range(1, m + 1):
                        self.alignment_table[i][j][l][m] = initial_value
            else:
                warnings.warn("Source sentence is too long (" + str(l) +
                              " words). Results may be less accurate.")

        self.train(sentence_aligned_corpus, iterations)
        self.output_file = output_file
        print "output_file: ", self.output_file
        self.__align_all(sentence_aligned_corpus)
コード例 #7
0
ファイル: ibm2.py プロジェクト: tchangw/nltk
    def __init__(self,
                 sentence_aligned_corpus,
                 iterations,
                 probability_tables=None):
        """
        Train on ``sentence_aligned_corpus`` and create a lexical
        translation model and an alignment model.

        Translation direction is from ``AlignedSent.mots`` to
        ``AlignedSent.words``.

        :param sentence_aligned_corpus: Sentence-aligned parallel corpus
        :type sentence_aligned_corpus: list(AlignedSent)

        :param iterations: Number of iterations to run training algorithm
        :type iterations: int

        :param probability_tables: Optional. Use this to pass in custom
            probability values. If not specified, probabilities will be
            set to a uniform distribution, or some other sensible value.
            If specified, all the following entries must be present:
            ``translation_table``, ``alignment_table``.
            See ``IBMModel`` for the type and purpose of these tables.
        :type probability_tables: dict[str]: object
        """
        super().__init__(sentence_aligned_corpus)

        if probability_tables is None:
            # Get translation probabilities from IBM Model 1
            # Run more iterations of training for Model 1, since it is
            # faster than Model 2
            ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations)
            self.translation_table = ibm1.translation_table
            self.set_uniform_probabilities(sentence_aligned_corpus)
        else:
            # Set user-defined probabilities
            self.translation_table = probability_tables["translation_table"]
            self.alignment_table = probability_tables["alignment_table"]

        for n in range(0, iterations):
            self.train(sentence_aligned_corpus)

        self.align_all(sentence_aligned_corpus)
コード例 #8
0
def IBM_Model_1(corpus):
    bitext = []
    for x in corpus:
        bitext.append(
            AlignedSent(x[SOURCE_LANGUAGE].split(),
                        x[DESTINATION_LANGUAGE].split()))
    print("IBM MODEL 1 :")
    print("")
    #calling the inbuilt IBM Model 1 function
    ibm1 = IBMModel1(bitext, NUMBER_OF_ITERATIONS)
    for test in bitext:
        print("Source sentence:")
        print(test.words)
        print("Destination sentence:")
        print(test.mots)
        print("Alignment:")
        print(test.alignment)
        print("")
    print("----------------------------------------")
    return ibm1.translation_table, bitext
コード例 #9
0
ファイル: tmp.py プロジェクト: skyhigh97/IBM-Model
from nltk.translate import IBMModel1, AlignedSent
import numpy as np

bitext = []
bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big']))
bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))

ibm1 = IBMModel1(bitext, 5)
print(ibm1.translation_table['buch']['book'])
コード例 #10
0
def compare_ibm_1_nltk(t, max_steps, src, tar):
	print('Compare my IBM Model 1 to nltk library:')
	aligned = test_sets_to_aligned(src, tar)
	ibm1 = IBMModel1(aligned, max_steps)
	compare_t_table(ibm1, t)
コード例 #11
0
    with open('ibm_part1.pkl', 'rb') as file:
        ibm_part1 = pickle.load(file)
    print('Loading ibm model 2')
    with open('ibm_part2.pkl', 'rb') as file:
        ibm_part2 = pickle.load(file)
    print('Loading ibm model 3')
    with open('ibm_part3.pkl', 'rb') as file:
        ibm_part3 = pickle.load(file)
    # with open('ibm_part4.pkl', 'rb') as file:
    #     ibm_part4 = pickle.load(file)
    # with open('ibm_part5.pkl', 'rb') as file:
    #     ibm_part5 = pickle.load(file)
else:
    #bitext = bitext_part1+bitext_part2+bitext_part3#+bitext_part4+bitext_part5
    print('Creating ibm part 1')
    ibm_part1 = IBMModel1(bitext_part1, 5)
    with open('ibm_part1.pkl', 'wb') as file:
        pickle.dump(ibm_part1, file)

    print('Creating ibm part 2')
    ibm_part2 = IBMModel1(bitext_part2, 5)
    with open('ibm_part2.pkl', 'wb') as file:
        pickle.dump(ibm_part2, file)

    print('Creating ibm part 3')
    ibm_part3 = IBMModel1(bitext_part3, 5)
    with open('ibm_part3.pkl', 'wb') as file:
        pickle.dump(ibm_part3, file)

    print('Creating ibm part 4')
    ibm_part4 = IBMModel1(bitext_part4, 5)
コード例 #12
0
    # Structures: 1-paragraph alignment only, 2-sentence alignment based on paragraphs, 3-direct sentence alignment
    structures = {1: "para", 2: "psent", 3: "sent"}
    struct_num = 1

    for i in range(1, 44):
        en_path = 'translation-dashboard/data/en-ba-' + structures[
            struct_num] + '-align/en-chapter-' + str(i) + '.txt'
        ba_path = 'translation-dashboard/data/en-ba-' + structures[
            struct_num] + '-align/ba-chapter-' + str(i) + '.txt'
        aligned_paras.extend(para_as_sent(en_path, ba_path))
        wc += word_count(en_path)
    # print (wc.freq("i"))

    num_iterations = 20
    start = timer()
    model = IBMModel1(aligned_paras, num_iterations)
    end = timer()
    timeelapsed = end - start  # timer will only evaluate time taken to run IBM Models

    with open('align_models/ibm-model-runtimes.csv', 'a',
              encoding='utf-8') as output_file:
        output_writer = csv.writer(output_file, delimiter='\t')
        output_writer.writerow([
            "1",
            str(num_iterations), timeelapsed,
            socket.gethostname(), 'struct' + str(struct_num)
        ])
    output_file.close()

    # Save model and word count
    with open('align_models/ibm1.model', 'wb') as m_file:
コード例 #13
0
print(" ")
# In[266]:

#TRAINING IBM MODEL 1
from collections import defaultdict
from nltk.translate import AlignedSent
from nltk.translate import Alignment
from nltk.translate import IBMModel, IBMModel1, IBMModel2
from nltk.translate.ibm_model import Counts
#bitext will have the parallel corpus
bitext = []
for i in range(len(fr)):
    bitext.append(AlignedSent(en[i], fr[i]))
#Training for 100 iterations
ibm1 = IBMModel1(bitext, 1000)
#trans_dict will contain the translation probabilities for each distinct pair of words
#pair being of the form (english_word,french_word)
trans_dict = ibm1.translation_table

# In[267]:

#ALIGNMENTS OF IBM MODEL 1

print("IBM MODEL 1")
for i in range(len(fr)):
    test_sentence = bitext[i]
    align_ibm = test_sentence.alignment
    #print(test_sentence)
    print(align_ibm)
    #print(" ")
def generateModels(qtype):
	# dictionary, pwC, pdf = prepare_corpus("data/linkSO",recompute=False)
	datadir = "data/linkSO"
	all_questions = pd.read_csv(join(datadir, "linkso/topublish/" + qtype + "/" + qtype + "_qid2all.txt"), sep='\t', \
								names=['qID', 'qHeader', 'qDescription', 'topVotedAnswer', 'type'])
	similar_docs_file = pd.read_csv(join(datadir, "linkso/topublish/" + qtype + "/" + qtype + "_cosidf.txt"), sep='\t', \
									names=['qID1', 'qID2', 'score', 'label'], skiprows=1)
	filtered_rows = similar_docs_file[similar_docs_file[label] == 1]
	filtered_columns = filtered_rows.filter(items=[question_id1, question_id2])
	bitext_qH_qH = []
	bitext_qD_qD = []
	bitext_qHqD_qHqD = []
	loop_counter = 0
	for each_row in filtered_columns.itertuples():
		q1ID = each_row[1]
		q2ID = each_row[2]
		q1_row = all_questions.loc[all_questions[question_id] == q1ID]
		q1header = str(q1_row[question_header].values[0]).split()
		q1desc = str(q1_row[question_description].values[0]).split()
		q1ans = str(q1_row[top_answer].values[0]).split()
		q2_row = all_questions.loc[all_questions[question_id] == q2ID]
		q2header = str(q2_row[question_header].values[0]).split()
		q2desc = str(q2_row[question_description].values[0]).split()
		q2ans = str(q2_row[top_answer].values[0]).split()
		# print("\nQ1 Header:", q1header)
		# print("Q1 Desc:", q1desc)
		# print("Q1 Answer:", q1ans)
		# print("Q2:", q2header)
		# print("Q2 Desc:", q2desc)
		# print("Q2 Answer:", q2ans)
		bitext_qH_qH.append(AlignedSent(q1header, q2header))
		bitext_qD_qD.append(AlignedSent(q1desc, q2desc))
		bitext_qHqD_qHqD.append(AlignedSent(q1header + q1desc, q2header + q2desc))
		loop_counter += 1

	# Model 1
	print("Training Model1 QH QH..")
	start = time.time()
	ibmQH = IBMModel1(bitext_qH_qH, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQH_Model1_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQH, fout)

	print("Training Model1 QD QD..")
	start = time.time()
	ibmQD = IBMModel1(bitext_qD_qD, 50)
	print("Model QD QD trained.. In", time.time() - start, " seconds..")
	with open('modelQDQD_Model1_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQD, fout)

	print("Training Model1 QHQD QHQD..")
	start = time.time()
	ibmQHQD = IBMModel1(bitext_qHqD_qHqD, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQD_Model1_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQHQD, fout)

	print(round(ibmQH.translation_table['html']['web'], 10))
	print(round(ibmQD.translation_table['html']['web'], 10))
	print(round(ibmQHQD.translation_table['html']['web'], 10))

	# Model 2
	print("Training Model2 QH QH..")
	start = time.time()
	ibmQH = IBMModel2(bitext_qH_qH, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQH_Model2_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQH, fout)

	print("Training Model2 QD QD..")
	start = time.time()
	ibmQD = IBMModel2(bitext_qD_qD, 50)
	print("Model QD QD trained.. In", time.time() - start, " seconds..")
	with open('modelQDQD_Model2_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQD, fout)

	print("Training Model2 QHQD QHQD..")
	start = time.time()
	ibmQHQD = IBMModel2(bitext_qHqD_qHqD, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQD_Model2_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQHQD, fout)

	print(round(ibmQH.translation_table['html']['web'], 10))
	print(round(ibmQD.translation_table['html']['web'], 10))
	print(round(ibmQHQD.translation_table['html']['web'], 10))

	# Model 3
	print("Training Model3 QH QH..")
	start = time.time()
	ibmQH = IBMModel3(bitext_qH_qH, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQH_Model3_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQH, fout)

	print("Training Model3 QD QD..")
	start = time.time()
	ibmQD = IBMModel3(bitext_qD_qD, 50)
	print("Model QD QD trained.. In", time.time() - start, " seconds..")
	with open('modelQDQD_Model3_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQD, fout)

	print("Training Model3 QHQD QHQD..")
	start = time.time()
	ibmQHQD = IBMModel3(bitext_qHqD_qHqD, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQD_Model3_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQHQD, fout)

	print(round(ibmQH.translation_table['html']['web'], 10))
	print(round(ibmQD.translation_table['html']['web'], 10))
	print(round(ibmQHQD.translation_table['html']['web'], 10))

	# Model 4
	print("Training Model4 QH QH..")
	start = time.time()
	ibmQH = IBMModel4(bitext_qH_qH, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQH_Model4_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQH, fout)

	print("Training Model4 QD QD..")
	start = time.time()
	ibmQD = IBMModel4(bitext_qD_qD, 50)
	print("Model QD QD trained.. In", time.time() - start, " seconds..")
	with open('modelQDQD_Model4_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQD, fout)

	print("Training Model4 QHQD QHQD..")
	start = time.time()
	ibmQHQD = IBMModel4(bitext_qHqD_qHqD, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQD_Model4_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQHQD, fout)

	print(round(ibmQH.translation_table['html']['web'], 10))
	print(round(ibmQD.translation_table['html']['web'], 10))
	print(round(ibmQHQD.translation_table['html']['web'], 10))

	# Model5
	print("Training Model5 QH QH..")
	start = time.time()
	ibmQH = IBMModel5(bitext_qH_qH, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQH_Model5_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQH, fout)

	print("Training Model5 QD QD..")
	start = time.time()
	ibmQD = IBMModel5(bitext_qD_qD, 50)
	print("Model QD QD trained.. In", time.time() - start, " seconds..")
	with open('modelQDQD_Model5_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQD, fout)

	print("Training Model5 QHQD QHQD..")
	start = time.time()
	ibmQHQD = IBMModel5(bitext_qHqD_qHqD, 50)
	print("Model QH QH trained.. In", time.time() - start, " seconds..")
	with open('modelQHQD_Model5_' + qtype + '.pk', 'wb') as fout:
		pickle.dump(ibmQHQD, fout)

	print(round(ibmQH.translation_table['html']['web'], 10))
	print(round(ibmQD.translation_table['html']['web'], 10))
	print(round(ibmQHQD.translation_table['html']['web'], 10))
コード例 #15
0
def main(src_path,tgt_path):
    srcs=[]
    tgts=[]
    with open(src_path) as f:
        for s in f:
            srcs.append(s.split())
    with open(tgt_path) as f:
        for t in f:
            tgts.append(t.split())

    assert len(srcs)==len(tgts)

    bitexts_s2t = []
    bitexts_t2s = []
    for s,t in zip(srcs,tgts):
        bitexts_s2t.append(AlignedSent(t, s))
        bitexts_t2s.append(AlignedSent(s,t))

    ibm1_s2t = IBMModel1(bitexts_s2t,5)
    ibm1_t2s = IBMModel1(bitexts_t2s,5)

    p_s_given_t =0
    p_t_given_s =0
    Ds=defaultdict(list)
    Dt=defaultdict(list)
    Dlen=defaultdict(list)
    Dscore=defaultdict(list)

    for key in ibm1_t2s.translation_table.keys():
        #print("sum t[t|*]=>",sum(ibm1_t2s.translation_table[key].values()))
        for key_s in ibm1_t2s.translation_table[key].keys():
            Ds[key_s].append(ibm1_t2s.translation_table[key][key_s])

    for key in Ds.keys():
        #print("sum P(*|s)=>",sum(Ds[key]))
        pass

    for key in Ds.keys():
        if key is None:
            continue
        Dlen[len(key)].append(len(Ds[key]))
        Dscore[len(key)].extend(Ds[key])

    for key in sorted(Dlen.keys(),key=lambda x:int(x)):
        #print("Dlen=>",key,sum(Dlen[key])/len(Dlen[key]))
        #print("Dscore=>",key,sum(Dscore[key])/len(Dscore[key]))
        pass

    for b in bitexts_t2s:
        tgt,src,align = b.words,b.mots,b.alignment
        for (idx_tgt, idx_src) in align:
            if idx_src is None:
                continue
            #print("t:{}->s:{}".format(tgt[idx_tgt],src[idx_src]))
            p_t_given_s +=  log(ibm1_t2s.translation_table[tgt[idx_tgt]][src[idx_src]])

    for b in bitexts_s2t:
        src,tgt,align = b.words,b.mots,b.alignment
        for (idx_src, idx_tgt) in align:
            if idx_tgt is None:
                continue
            p_s_given_t +=  log(ibm1_s2t.translation_table[src[idx_src]][tgt[idx_tgt]])

    p_s_given_t=p_s_given_t/sum([len(v) for v in tgts])
    p_t_given_s=p_t_given_s/sum([len(v) for v in srcs])

    print("log P(s|t)=> {}\nlog P(t|s)=>{}".format(p_s_given_t,p_t_given_s))