示例#1
0
def main():

    # load trained model
    print 'Loading neural machine translator with attention:'
    train_set = Corpus(TRAIN_SRC, TRAIN_TGT)
    dev_set = Corpus(DEV_SRC, DEV_TGT)
    m = load_model(train_set, dev_set, MODEL)
    print 'Model loaded!'

    # dev_set.target_sentences = dev_set.target_sentences[100:200]
    # dev_set.source_sentences = dev_set.source_sentences[100:200]

    # translate sentence
    print '\nTranslating . . .\n'
    sample_output = np.random.choice(len(dev_set.target_sentences), 5, False)
    greedy, beam = gen_all(m, dev_set.source_sentences, BSIZE)
    for sample in sample_output:
        print 'Target: {}'.format(' '.join(dev_set.target_sentences[sample]))
        print 'Greedy: {}'.format(' '.join(greedy[sample]))
        print 'Beam search: {}'.format(' '.join(beam[sample]))
        print '----------'

    greedy_score = get_bleu_score(greedy, dev_set.target_sentences)
    beam_score = get_bleu_score(beam, dev_set.target_sentences)

    print 'Greedy bleu score: ', greedy_score
    print 'Beam search bleu score: ', beam_score
示例#2
0
def evaluateAll(encoder, decoder, split='dev', use_attention=True):
    if use_attention:
        model = 'attention'
    else:
        model = 'simple'

    if split == 'train':
        eval_pairs = train_pairs
    elif split == 'dev':
        eval_pairs = dev_pairs

    translated_sentences = []
    target_sentences = []

    f_out = open('%s_%s_out.txt' % (split,model), 'w')
    for i in range(len(eval_pairs)):
        pair = eval_pairs[i]
        f_out.write('> '+pair[0]+'\n')
        f_out.write('= '+pair[1]+'\n')
        target_sentences.append(pair[1])
        #print('>', pair[0])
        #print('=', pair[1])
        output_words, attentions = evaluate(encoder, decoder, pair[0], use_attention=use_attention)
        output_sentence = ' '.join(output_words)
        f_out.write('< '+output_sentence+'\n\n')
        translated_sentences.append(output_sentence)
        #print('<', output_sentence)
        #print('')
    f_out.close()

    bleu_score = get_bleu_score(translated_sentences, target_sentences)
    print('BLEU score: ', bleu_score)
示例#3
0
def main(train_src_file, train_tgt_file, dev_src_file, dev_tgt_file, model_file, num_epochs, embeddings_init = None, seed = 0):
    print('reading train corpus ...')
    train_set = Corpus(train_src_file, train_tgt_file)
    # assert()
    print('reading dev corpus ...')
    dev_set = Corpus(dev_src_file, dev_tgt_file)

    # test_set = Corpus(test_src_file)

    print 'Initializing neural machine translator with attention:'
    # src_vocab_size, tgt_vocab_size, tgt_idx2word, word_d, gru_d, gru_layers
    encoder_decoder = nmt_dynet_attention(len(train_set.source_word2idx), len(train_set.target_word2idx), train_set.source_word2idx, train_set.source_idx2word, train_set.target_word2idx, train_set.target_idx2word, 50, 50, 2)

    trainer = SimpleSGDTrainer(encoder_decoder.model)

    sample_output = np.random.choice(len(dev_set.target_sentences), 5, False)
    losses = []
    best_bleu_score = 0
    for epoch in range(num_epochs):
        print 'Starting epoch', epoch
        # shuffle the training data
        combined = list(zip(train_set.source_sentences, train_set.target_sentences))
        random.shuffle(combined)
        train_set.source_sentences[:], train_set.target_sentences[:] = zip(*combined)

        print 'Training . . .'
        sentences_processed = 0
        for src_sentence, tgt_sentence in zip(train_set.source_sentences, train_set.target_sentences):
            loss = encoder_decoder.get_loss(src_sentence, tgt_sentence)
            loss_value = loss.value()
            loss.backward()
            trainer.update()
            sentences_processed += 1
            if sentences_processed % 4000 == 0:
                print 'sentences processed: ', sentences_processed

        # Accumulate average losses over training to plot
        val_loss = get_val_set_loss(encoder_decoder, dev_set)
        print 'Validation loss this epoch', val_loss
        losses.append(val_loss)

        print 'Translating . . .'
        translated_sentences = encoder_decoder.translate_all(dev_set.source_sentences)

        print('translating {} source sentences...'.format(len(sample_output)))
        for sample in sample_output:
            print('Target: {}\nTranslation: {}\n'.format(' '.join(dev_set.target_sentences[sample]),
                                                                         ' '.join(translated_sentences[sample])))

        bleu_score = get_bleu_score(translated_sentences, dev_set.target_sentences)
        print 'bleu score: ', bleu_score
        if bleu_score > best_bleu_score:
            best_bleu_score = bleu_score
            # save the model
            encoder_decoder.save(model_file)

    print 'best bleu score: ', best_bleu_score
示例#4
0
def evaluate():
    # Load and check whether UNK, SOS, EOS appears in the vocabulary.
    source_vocab, source_vocab_length = load_and_check_vocab(SOURCE_VOCAB_FILE)
    target_vocab, target_vocab_length = load_and_check_vocab(TARGET_VOCAB_FILE)

    # Get UNK, SOS, EOS id.
    source_unk_id = source_vocab.index(UNK)
    source_eos_id = source_vocab.index(EOS)
    target_unk_id = target_vocab.index(UNK)
    target_sos_id = target_vocab.index(SOS)
    target_eos_id = target_vocab.index(EOS)

    # Compute the length of effective training data.
    source_bool_mask = check_dataset(SOURCE_TRAINING_FILE)
    target_bool_mask = check_dataset(TARGET_TRAINING_FILE)
    bool_mask = [x and y for x, y in zip(source_bool_mask, target_bool_mask)]
    dataset_length = sum(bool_mask)

    # Create source and target vocabulary tables.
    source_vocab_table = tf.contrib.lookup.index_table_from_file(
        DATA_DIR + SOURCE_VOCAB_FILE, default_value=source_unk_id)
    target_vocab_table = tf.contrib.lookup.index_table_from_file(
        DATA_DIR + TARGET_VOCAB_FILE, default_value=target_unk_id)

    # Get dataset iterator tuple(initializer, source_input, target_input, target_output, source_length, target_length).
    iterator = get_dataset_iterator(dataset_length, SOURCE_TEST_FILE_1,
                                    TARGET_TEST_FILE_1, source_vocab_table,
                                    target_vocab_table, source_eos_id,
                                    target_sos_id, target_eos_id)

    # Load model.
    model = Seq2Seq_Model()
    model.build_evaluation_model(source_vocab_length, target_vocab_length,
                                 target_sos_id, target_eos_id,
                                 iterator.source_input, iterator.target_output,
                                 iterator.source_length)

    iterator_initializer = iterator.initializer
    table_initializer = tf.tables_initializer()

    with tf.Session() as sess:
        # Initialization.
        sess.run([iterator_initializer, table_initializer])

        # Load parameters.
        saver_embedding = tf.train.Saver(model.embedding)
        saver_network = tf.train.Saver(model.network_params)
        saver_embedding.restore(sess, SAVE_DIR + "embedding")
        saver_network.restore(sess, SAVE_DIR + "network_params")

        list_translation = []
        list_reference = []

        total_step = int(np.ceil(dataset_length / BUCKET_BATCH_SIZE))
        for step in range(total_step):
            translation_ids, reference_ids = sess.run(
                [model.translation_ids, model.reference_ids])

            # Shorten the sequence by removing the paddings.
            translation_ids = shorten_sequence(translation_ids,
                                               target_eos_id,
                                               dtype=np.int32)
            reference_ids = shorten_sequence(reference_ids,
                                             target_eos_id,
                                             dtype=np.int32)

            list_translation += translation_ids
            list_reference += [[reference] for reference in reference_ids]
            print("Current progress: ",
                  step + 1,
                  "/",
                  total_step,
                  sep="",
                  end="\r",
                  flush=True)

        # Estimate the BLEU score.
        score = get_bleu_score(list_translation, list_reference)

        print("")
        print("BLEU score = ", format(score, ".8f"), sep="")