def on_epoch_end(self, epoch, logs={}):
        print("On Epoch End: ")
        print("epoch: ", epoch)

        t = time.time()
        builder = Seq2SeqBuilder()
        encoder_model, decoder_model = builder.build_encoder_decoder_inference(
            self.model)
        inference = Inference(
            './dataset/image_embeddings_to_sentence/stories_to_index_' +
            self.dataset_type + '.hdf5', './dataset/vist2017_vocabulary.json',
            encoder_model, decoder_model)

        hypotheses_filename = "./results/temp/hypotheses_" + self.dataset_type + "_epoch_" + str(
            epoch) + ".txt"
        references_filename = "./results/original_" + self.dataset_type + ".txt"

        inference.predict_all(batch_size=64,
                              references_file_name='',
                              hypotheses_file_name=hypotheses_filename)

        # calculating Meteor
        status, output_meteor = commands.getstatusoutput(
            "java -Xmx2G -jar nlp/meteor-1.5.jar " + hypotheses_filename +
            " " + references_filename + " -t hter -l en -norm")

        # Calculating BLEU score
        status, output_bleu = commands.getstatusoutput(
            "perl ./nlp/multi-bleu.perl " + references_filename + " < " +
            hypotheses_filename)

        text_file = open(
            './results/temp/' + "bleu_" + self.dataset_type + "_epoch_" +
            str(epoch), "w")
        text_file.write(output_bleu)
        text_file.close()

        text_file = open(
            './results/temp/' + "meteor_" + self.dataset_type + "_epoch_" +
            str(epoch), "w")
        text_file.write(output_meteor)
        text_file.close()

        print("Meteor/Bleu time(minutes) : ", (time.time() - t) / 60.0)
        print("end calculating")
    def __init__(self, dataset_file_path, vocabulary_file_path, encoder_model,
                 decoder_model):

        self.vocab_json = json.load(open(vocabulary_file_path))
        self.num_decoder_tokens = len(self.vocab_json['idx_to_words'])
        self.words_to_idx = self.vocab_json["words_to_idx"]
        self.idx_to_words = self.vocab_json["idx_to_words"]
        self.dataset_file = h5py.File(dataset_file_path, 'r')
        self.story_ids = self.dataset_file["story_ids"]
        self.image_embeddings = self.dataset_file["image_embeddings"]
        self.story_sentences = self.dataset_file["story_sentences"]
        self.encoder_model = encoder_model
        self.decoder_model = decoder_model
        self.vocab_size = len(self.words_to_idx)

        self.num_stacked_layers = Seq2SeqBuilder().get_number_of_layers(
            encoder_model, layer_prefix="encoder_layer_")

        return
from seq2seqbuilder import Seq2SeqBuilder, SentenceEncoderRNN, SentenceEncoderCNN
from result_visualisation import Inference
import time as time
from keras import backend as K
from keras.utils import plot_model

# The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time.
K.set_learning_phase(0)
#dataset_type = "valid"
dataset_type = "review_test1"
#model_name = "2018-04-30_08:56:33-2018-04-30_20:37:12"
model_name = "2019-01-20_18-13-57-2019-01-20_19-20-14"
model_file_name = "./trained_models/" + model_name + ".h5"

# model_file_name = "trained_models/2018-01-18_17:39:24-2018-01-20_18:50:39:image_to_text_gru.h5"
builder = Seq2SeqBuilder()
sentence_encoder = SentenceEncoderRNN()
encoder_model, decoder_model = builder.build_encoder_decoder_inference_from_file(model_file_name, sentence_encoder,
                                                                                 include_sentence_encoder=True,
                                                                                 attention=True)
# plot_model(encoder_model, to_file='encomodel.png',show_shapes=True)
# plot_model(decoder_model, to_file='decomodel.png',show_shapes=True)
inference = Inference('./dataset/image_embeddings_to_sentence/stories_to_index_' + dataset_type + '.hdf5',
                      './dataset/vist2017_vocabulary.json', encoder_model, decoder_model)
t = time.time()
# inference.predict_all(batch_size=64, references_file_name='',
#                       hypotheses_file_name="./results/"+ model_name +"/hypotheses_" +dataset_type + ".txt")
# beam_size = 10
# inference.predict_all_beam_search(batch_size=600, beam_size=beam_size, hypotheses_file_name="./results/"+ model_name +"/hypotheses_" +dataset_type + "_beam"+str(beam_size)+".txt")
inference.predict_all(batch_size=50, references_file_name='',
                      hypotheses_file_name="./results/hypotheses_" + dataset_type + ".txt",