def _rnn(self, x):
        word_idx = x
        word_idx = tf.cast(word_idx, tf.int32)
        init_embedding = tf.constant(WordVec.word_vecs)
        with tf.variable_scope('Embeddings'):
            embeddings = tf.get_variable('embeddings',
                                         initializer=init_embedding)
        word_vec = nn.embedding_lookup(embeddings, word_idx)
        input = tf.unstack(word_vec, axis=1)

        lstm_cell = rnn.LSTMCell(self.lstm_units,
                                 use_peepholes=True,
                                 forget_bias=1.0,
                                 initializer=self.init)

        if self.mode == 'train':
            keep_prob = self.keep_prob
        else:
            keep_prob = 1.0

        lstm_cell = nn.rnn_cell.DropoutWrapper(lstm_cell,
                                               input_keep_prob=keep_prob,
                                               output_keep_prob=keep_prob)

        outputs, states = rnn.static_rnn(lstm_cell, input, dtype=tf.float32)

        return outputs
 def _get_word_embed(h):
     word_logit = matmul(h, self.output_w) + self.bhid
     word_chosen1 = tf.argmax(word_logit, 1)
     word_chosen2 = tf.multinomial(word_logit, 1)
     word_chosen2 = tf.squeeze(word_chosen2)
     word_chosen = tf.cond(self.if_argmax, lambda: word_chosen1,
                           lambda: word_chosen2)
     return embedding_lookup(self.embeddings, word_chosen)
Exemple #3
0
    def _create_embedding_layers(self):
        """pass"""
        embedding = []

        with tf.device("/cpu:0"), tf.variable_scope("embedding_layer"):
            self.token_embedding_layer = tf.get_variable("token_embedding",
                                                         shape = [self.num_token, self.token_dim],
                                                         initializer = tc.layers.xavier_initializer())
            embedding.append(nn.embedding_lookup(self.token_embedding_layer,
                                                 self.input_ids))

            if self.use_seg_feature:
                self.seg_embedding_layer = tf.get_variable("seg_embedding",
                                                           shape = [4, self.seg_dim],
                                                           initializer = tc.layers.xavier_initializer())
                embedding.append(nn.embedding_lookup(self.seg_embedding_layer,
                                                     self.seg_ids))

            embedding = tf.concat(embedding, axis = -1)

            assert embedding.shape[-1].value == self.embedding_dim, \
                f"concatenated embedding shape {embedding.shape} error"

            self.embedding = nn.dropout(embedding, self.dropout)
    def _construct_model(self):
        n_steps = tf.shape(self.words)[0]
        n_x, n_h, n_v = self.options['n_x'], self.options['n_h'], self.options[
            'n_v']
        batch_size = tf.shape(self.y)[0]
        words_embed = embedding_lookup(self.embeddings, self.words)
        # n_steps, batch_size, embed_dim
        words_embed = dropout(words_embed, self.keep_prob,
                              (1, batch_size, n_x))
        # convert video feature from None * n_z to None * n_x
        # vid_feat_proj shape: (1, batch_size, embed_dim)
        vid_feat_proj = expand_dims(matmul(self.z, self.c0), 0)
        # use video feature as the input for the first step
        # state_below shape: (n_steps, batch_size, embed_dim)
        state_below = concat([vid_feat_proj, words_embed[:-1]], 0)

        # h_list shape: (n_steps, batch_size, h_dims)
        self.h_list = self._decoder_layer(state_below)
        self.h_list_reshape = reshape(self.h_list, [-1, n_h])
        # logits shape: (n_steps*batch_size, n_vocabulary)
        self.logits = matmul(self.h_list_reshape, self.output_w) + self.bhid
        logits_reshaped = reshape(self.logits, [-1, batch_size, n_v])
        self.sents = tf.argmax(logits_reshaped, -1)
        # w_reshape shape: (n_steps, batch_size)
        weighted_mask = self.mask / (tf.reduce_sum(self.mask, 0, keepdims=True)
                                     **0.7)
        self.loss = sparse_softmax_cross_entropy(
            logits=logits_reshaped + 1e-8,
            labels=self.words,
            weights=weighted_mask,
            reduction=tf.losses.Reduction.SUM)
        self.train_loss = self.loss / tf.cast(batch_size, tf.float32)

        test_h_list = self._test_layer(matmul(self.z, self.c0))
        test_h_list = reshape(test_h_list, (-1, n_h))
        test_logits = matmul(test_h_list, self.output_w) + self.bhid
        test_logits = reshape(test_logits, (-1, batch_size, n_v))
        self.test_sents = tf.argmax(test_logits, -1)
import tensorflow as tf
import numpy as np
from tensorflow.nn import embedding_lookup

tf.enable_eager_execution()

sources = ['isso vai funcionar?']

vocab_sources = [word for sentence in sources for word in sentence.split()]

word2idx_vocab = {word: i for i, word in enumerate(vocab_sources)}

print(word2idx_vocab)

encoder_inputs = np.zeros((len(sources[0].split()), 1))

print(encoder_inputs)
embedding = tf.get_variable("embedding_encoder", [len(vocab_sources), 2])
print(embedding)
print(embedding_lookup(embedding, [0, 0, 2, 3]))
Exemple #6
0
 def embedding_layer(self, sequence):
     return embedding_lookup(self.embedding_matrix,
                             sequence)  # (batch_size, time_step, emb_dim)
Exemple #7
0
def build_preprocess(input, embedding, training):
    cell_input = nn.embedding_lookup(embedding, input)
    if training:
        cell_input = nn.dropout(cell_input, DROPOUT)
    return cell_input
Exemple #8
0
    def __init__(self, **kwargs):
        '''The following arguments are accepted:

        Parameters
        ----------
        vocab_size  :   int
                        Size of the vocabulary for creating embeddings
        embedding_matrix    :   int
                                Dimensionality of the embedding space
        memory_size :   int
                        LSTM memory size
        keep_prob   :   float
                        Inverse of dropout percentage for embedding and LSTM
        subsequence_length  :   int
                                Length of the subsequences (all embeddings are padded to this
                                length)
        optimizer   :   OptimizerSpec
        '''
        ############################################################################################
        #                                 Get all hyperparameters                                  #
        ############################################################################################
        vocab_size = kwargs['vocab_size']
        embedding_size = kwargs['embedding_size']
        memory_size = kwargs['memory_size']
        keep_prob = kwargs['keep_prob']
        subsequence_length = kwargs['subsequence_length']
        optimizer_spec = kwargs['optimizer']
        optimizer = optimizer_spec.create()
        self.learning_rate = optimizer_spec.learning_rate
        self.step_counter = optimizer_spec.step_counter

        ############################################################################################
        #                                        Net inputs                                        #
        ############################################################################################
        self.batch_size = placeholder(tf.int32, shape=[], name='batch_size')
        self.is_training = placeholder(tf.bool, shape=[], name='is_training')
        self.word_ids = placeholder(tf.int32,
                                    shape=(None, subsequence_length),
                                    name='word_ids')
        self.labels = placeholder(tf.int32, shape=(None, ), name='labels')
        self.hidden_state = placeholder(tf.float32,
                                        shape=(None, memory_size),
                                        name='hidden_state')
        self.cell_state = placeholder(tf.float32,
                                      shape=(None, memory_size),
                                      name='cell_state')

        lengths = sequence_lengths(self.word_ids)

        ############################################################################################
        #                                        Embedding                                         #
        ############################################################################################
        self.embedding_matrix, _bias = get_weights_and_bias(
            (vocab_size, embedding_size))
        embeddings = cond(
            self.is_training, lambda: nn.dropout(nn.embedding_lookup(
                self.embedding_matrix, self.word_ids),
                                                 keep_prob=keep_prob),
            lambda: nn.embedding_lookup(self.embedding_matrix, self.word_ids))

        ############################################################################################
        #                                        LSTM layer                                        #
        ############################################################################################
        cell = BasicLSTMCell(memory_size, activation=tf.nn.tanh)

        # during inference, use entire ensemble
        keep_prob = cond(self.is_training, lambda: constant(keep_prob),
                         lambda: constant(1.0))
        cell = DropoutWrapper(cell, output_keep_prob=keep_prob)

        # what's the difference to just creating a zero-filled tensor tuple?
        self.zero_state = cell.zero_state(self.batch_size, tf.float32)
        state = LSTMStateTuple(h=self.cell_state, c=self.hidden_state)

        # A dynamic rnn creates the graph on the fly, so it can deal with embeddings of different
        # lengths. We do not need to unstack the embedding tensor to get rows, instead we compute
        # the actual sequence lengths and pass that
        # We are not sure how any of this works. Do we need to mask the cost function so the cell
        # outputs for _NOT_A_WORD_ inputs are ignored? Is the final cell state really relevant if it
        # was last updated with _NOT_A_WORD_ input? Does static_rnn absolve us of any of those
        # issues?
        outputs, self.state = nn.dynamic_rnn(cell,
                                             embeddings,
                                             sequence_length=lengths,
                                             initial_state=state)
        # Recreate tensor from list
        outputs = reshape(concat(outputs, 1),
                          [-1, subsequence_length * memory_size])
        self.outputs = reduce_mean(outputs)

        ############################################################################################
        #                        Fully connected layer, loss, and training                         #
        ############################################################################################
        ff1 = fully_connected(outputs, 2, with_activation=False, use_bias=True)
        loss = reduce_mean(
            nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels,
                                                        logits=ff1))
        self.train_step = optimizer.minimize(loss,
                                             global_step=self.step_counter)
        self.predictions = nn.softmax(ff1)
        correct_prediction = equal(cast(argmax(self.predictions, 1), tf.int32),
                                   self.labels)
        self.accuracy = reduce_mean(cast(correct_prediction, tf.float32))

        ############################################################################################
        #                                    Create summaraies                                     #
        ############################################################################################
        with tf.variable_scope('summary'):
            self.summary_loss = tf.summary.scalar('loss', loss)
            self.summary_accuracy = tf.summary.scalar('accuracy',
                                                      self.accuracy)
 def _get_word_embed(h):
     word_logit = matmul(h, self.output_w) + self.bhid
     word_chosen = tf.argmax(word_logit, 1)
     return embedding_lookup(self.embeddings, word_chosen)