Example #1
0
    def predict_sampled_softmax(self, stored_weights):

        pred_softmax = PredictionLayer(self.num_samples,
                                       self.vocab_size,
                                       mode='predict')

        s0 = Input(shape=(self.rnn_dim, ), name='s0')
        s = [s0]

        probs = []
        for t in range(self.decoder_length + 1):

            label_t = Lambda(lambda x: self.labels[:, t, :],
                             name='label-%s' % t)(self.labels)
            x_dec = Lambda(lambda x: self.dec_embedded_sequences[:, t, :],
                           name='dec_embedding-%s' % t)(
                               self.dec_embedded_sequences)
            x_dec = Reshape((1, self.embedding_dim))(x_dec)

            if t == 0:
                s = self.out_bidir_doc_encoder

            s, _ = self.fwd_decoder(x_dec, initial_state=s)
            softmax_prob = pred_softmax([s, label_t])
            probs.append(softmax_prob)
            s = [s]

        prediction_model = Model(
            inputs=[self.in_document, self.in_decoder, s0, self.labels],
            outputs=probs)
        prediction_model.compile(loss=lambda y_true, loss: loss,
                                 optimizer='rmsprop')
        prediction_model.load_weights(
            os.path.join(self.filepath, '%s' % (stored_weights)))
        prediction_model.summary()

        encoder_model = Model(inputs=self.in_document,
                              outputs=self.out_bidir_doc_encoder)
        self.encoder_model = encoder_model

        self.prediction_model = prediction_model
        self.pred_softmax = pred_softmax

        return self.prediction_model
Example #2
0
    def predict_att_sampled_softmax(self, stored_weights):
        def one_step_attention(a, s_prev):

            s_prev = self.repeat_vector_att(s_prev)
            concat = self.concatenator_att([a, s_prev])
            e = self.densor1(concat)
            energies = self.densor2(e)
            alphas = self.att_weights(energies)
            context = self.dotor([alphas, a])

            return context

        pred_softmax = PredictionLayer(self.num_samples,
                                       self.vocab_size,
                                       mode='predict')

        s0 = Input(shape=(self.rnn_dim, ), name='s0')
        s = [s0]

        probs = []
        for t in range(self.decoder_length + 1):

            label_t = Lambda(lambda x: self.labels[:, t, :],
                             name='label-%s' % t)(self.labels)
            x_dec = Lambda(lambda x: self.in_dec_embedded[:, t, :],
                           name='dec_embedding-%s' % t)(self.in_dec_embedded)
            x_dec = Reshape((1, self.embedding_dim))(x_dec)
            '''
			One step attention
			'''
            # Perform one step of the attention mechanism to get back the context vector at step t
            context = one_step_attention(self.out_bidir_encoder, s[0])
            context = Reshape((1, self.rnn_dim))(context)
            context_concat = concatenate([x_dec, context], axis=-1)
            '''
			end of one-step attention
			'''

            #if t==0:
            #	s = self.out_bidir_encoder

            s, _ = self.fwd_decoder(context_concat, initial_state=s)
            softmax_prob = pred_softmax([s, label_t])
            probs.append(softmax_prob)
            s = [s]

        prediction_model = Model(
            inputs=[self.in_encoder, self.in_decoder, s0, self.labels],
            outputs=probs)
        prediction_model.compile(loss=lambda y_true, loss: loss,
                                 optimizer='rmsprop')
        prediction_model.load_weights(
            os.path.join(self.filepath, '%s' % (stored_weights)))
        prediction_model.summary()

        encoder_model = Model(inputs=self.in_encoder,
                              outputs=self.out_bidir_encoder)
        self.encoder_model = encoder_model

        self.prediction_model = prediction_model
        self.pred_softmax = pred_softmax
        # store attention layers
        self.repeat_vector_att = self.prediction_model.get_layer(
            "repeator_att")
        self.concatenator_att = self.prediction_model.get_layer("concator_att")
        self.densor1 = self.prediction_model.get_layer("densor1_att")
        self.densor2 = self.prediction_model.get_layer("densor2_att")
        self.att_weights = self.prediction_model.get_layer("attention_weights")
        self.dotor = self.prediction_model.get_layer("dotor_att")

        return self.prediction_model