def build_decoder(self):
        embedding_layer = neural_network.Embedding_Layer(
            [self.config.vocab_size, self.config.num_units])
        decoder_emb_inputs = embedding_layer.lookup(self.tgt_inputs)

        rnn_graph = neural_network.RNN_Graph(
            [self.config.hidden_size, self.config.num_layers], self.training,
            self.config.keep_prob, self.config.batch_size)
        self.decoder_initial_state = self.encoder_final_state

        helper = tf.contrib.seq2seq.TrainingHelper(
            decoder_emb_inputs,
            [config.tgt_max_sent_length - 1 for _ in range(config.batch_size)],
            time_major=True)
        projection_layer = layers_core.Dense(config.tgt_vocab_size,
                                             use_bias=False,
                                             name="output_projection")
        decoder = tf.contrib.seq2seq.BasicDecoder(
            rnn_graph.model_cell,
            helper,
            self.decoder_initial_state,
            output_layer=projection_layer)
        outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
            decoder, output_time_major=True, swap_memory=True)
        self.logits = outputs.rnn_output
Example #2
0
    def build(self):
        embedding_layer = neural_network.Embedding_Layer(
            [self.config.vocab_size, self.config.hidden_size])
        inputs = embedding_layer.lookup(self.inputs)

        if self.training and self.config.keep_prob < 1:
            inputs = tf.nn.dropout(inputs, self.config.keep_prob)

        rnn_graph = neural_network.RNN_Graph(
            [self.config.hidden_size, self.config.num_layers], self.training,
            self.config.keep_prob, self.config.batch_size)
        self.initial_state = rnn_graph.initial_state
        output, state = rnn_graph.feed_forward(inputs, self.config)

        hidden_layer = neural_network.Softmax_Layer(
            [self.config.hidden_size, self.config.vocab_size])
        self.logits = hidden_layer.feed_forward(output)
        self.logits = tf.reshape(self.logits, [
            self.config.batch_size, self.config.num_steps,
            self.config.vocab_size
        ])
        loss = tf.contrib.seq2seq.sequence_loss(
            self.logits,
            self.labels,
            tf.ones([self.config.batch_size, self.config.num_steps],
                    dtype=tf.float32),
            average_across_timesteps=False,
            average_across_batch=True)
        self.loss = tf.reduce_sum(loss)
        self.final_state = state
Example #3
0
    def build(self):
        e_layer = neural_network.Embedding_Layer(
            [config.VOCABULARY_SIZE, config.EMBEDDING_SIZE])
        embeddings = e_layer.lookup(self.inputs)
        self.embeddings = e_layer.embedding

        nce_layer = neural_network.NCE_Layer(
            [config.VOCABULARY_SIZE, config.EMBEDDING_SIZE])
        self.loss = nce_layer.loss(embeddings, self.labels)
    def build_encoder(self):
        embedding_layer = neural_network.Embedding_Layer(
            [self.config.vocab_size, self.config.num_units])
        encoder_emb_inputs = embedding_layer.lookup(self.src_inputs)

        rnn_graph = neural_network.RNN_Graph(
            [self.config.hidden_size, self.config.num_layers], self.training,
            self.config.keep_prob, self.config.batch_size)
        self.encoder_initial_state = rnn_graph.initial_state
        output, state = rnn_graph.encoder_feed_forward(encoder_emb_inputs,
                                                       self.config)
        self.encoder_final_state = state
Example #5
0
    def build(self):
        self.e_layer = neural_network.Embedding_Layer(
            [self.vocab_size, config.EMBEDDING_SIZE])
        inputs = self.e_layer.lookup(self.inputs)

        rnn_graph = neural_network.RNN_Graph(
            [self.config.HIDDEN_SIZE, self.config.NUM_LAYERS], self.training,
            self.config.keep_prob, self.config.BATCH_SIZE)
        self.initial_state = rnn_graph.initial_state
        output, state = rnn_graph.feed_forward(inputs)

        output = tf.reshape(output[:, -1, :], [-1, self.config.HIDDEN_SIZE])
        fc_layer = neural_network.Softmax_Layer(
            [self.config.HIDDEN_SIZE, self.config.NUM_CLASS])
        self.logits = fc_layer.feed_forward(output)

        self.loss = tf.reduce_mean(
            -tf.reduce_sum(self.labels * tf.log(self.logits), axis=0))
        self.final_state = state
Example #6
0
import config
import utils
import os
import numpy
import tensorflow as tf
import data

#%%
# =============================================================================
# data.py test
# =============================================================================
word_to_index, index_to_word, word_to_vec, emb_matrix = utils.read_glove_vecs(
    os.path.join(config.EMBEDDING_DIR, config.EMBEDDING_PATH))
data = data.DATA()
data.read_file(config.TRAIN_PATH, word_to_index)

#%%
# =============================================================================
# Embedding_Layer() test
# =============================================================================
inputs = tf.placeholder(shape=[1, 1], dtype=tf.int32)
word_to_index, index_to_word, word_to_vec, emb_matrix = utils.read_glove_vecs(
    os.path.join(config.EMBEDDING_DIR, config.EMBEDDING_PATH))
e_layer = neural_network.Embedding_Layer(shape=emb_matrix.shape)
with tf.Session() as session:
    init = tf.global_variables_initializer()
    session.run(init)
    e_layer.set_embedding(emb_matrix, session)
    see = session.run(e_layer.embedding)

#%%