Esempio n. 1
0
    def compute_states(self,emb):
        def unpack_sequence(tensor):
            return tf.unpack(tf.transpose(tensor, perm=[1, 0, 2]))



        with tf.variable_scope("Composition",initializer=
                               tf.contrib.layers.xavier_initializer(),regularizer=
                               tf.contrib.layers.l2_regularizer(self.reg)):
            cell_fw = rnn_cell.LSTMCell(self.hidden_dim)
            cell_bw = rnn_cell.LSTMCell(self.hidden_dim)
            #tf.cond(tf.less(self.dropout
            #if tf.less(self.dropout, tf.constant(1.0)):
            cell_fw = rnn_cell.DropoutWrapper(cell_fw,
                                           output_keep_prob=self.dropout,input_keep_prob=self.dropout)
            cell_bw=rnn_cell.DropoutWrapper(cell_bw, output_keep_prob=self.dropout,input_keep_prob=self.dropout)

            #output, state = rnn.dynamic_rnn(cell,emb,sequence_length=self.lngths,dtype=tf.float32)
            outputs,_,_=rnn.bidirectional_rnn(cell_fw,cell_bw,unpack_sequence(emb),sequence_length=self.lngths,dtype=tf.float32)
            #output = pack_sequence(outputs)
        sum_out=tf.reduce_sum(tf.stack(outputs),[0])
        sent_rep = tf.div(sum_out,tf.expand_dims(tf.to_float(self.lngths),1))



        final_state=sent_rep
        return final_state
Esempio n. 2
0
    def bidirectional_lstm_inference(x):
        RNN_HIDDEN_UNITS = 128

        # x was [BATCH_SIZE, 32, 32, 3]
        # x changes to [32, BATCH_SIZE, 32, 3]
        x = tf.transpose(x, [1, 0, 2, 3])
        # x changes to [32 * BATCH_SIZE, 32 * 3]
        x = tf.reshape(x, [-1, IMAGE_SIZE * RGB_CHANNEL_SIZE])
        # x changes to array of 32 * [BATCH_SIZE, 32 * 3]
        x = tf.split(axis=0, num_or_size_splits=IMAGE_SIZE, value=x)

        weights = tf.Variable(
            tf.random_normal([2 * RNN_HIDDEN_UNITS, LABEL_SIZE]))
        biases = tf.Variable(tf.random_normal([LABEL_SIZE]))

        # output size is 128, state size is (c=128, h=128)
        fw_lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
        bw_lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)

        # outputs is array of 32 * [BATCH_SIZE, 128]
        outputs, _, _ = rnn.bidirectional_rnn(fw_lstm_cell,
                                              bw_lstm_cell,
                                              x,
                                              dtype=tf.float32)

        # outputs[-1] is [BATCH_SIZE, 128]
        return tf.matmul(outputs[-1], weights) + biases
Esempio n. 3
0
def embedding_encoder(encoder_inputs,
                      cell,
                      embedding,
                      num_symbols,
                      embedding_size,
                      bidirectional=False,
                      dtype=None,
                      weight_initializer=None,
                      scope=None):

    with variable_scope.variable_scope(scope or "embedding_encoder",
                                       dtype=dtype) as scope:
        dtype = scope.dtype
        # Encoder.
        if not embedding:
            embedding = variable_scope.get_variable(
                "embedding", [num_symbols, embedding_size],
                initializer=weight_initializer())
        emb_inp = [
            embedding_ops.embedding_lookup(embedding, i)
            for i in encoder_inputs
        ]
        if bidirectional:
            _, output_state_fw, output_state_bw = rnn.bidirectional_rnn(
                cell, cell, emb_inp, dtype=dtype)
            encoder_state = tf.concat(1, [output_state_fw, output_state_bw])
        else:
            _, encoder_state = rnn.static_rnn(cell, emb_inp, dtype=dtype)

        return encoder_state