def predict(self, inputs_ph):

        piano_ph, orch_tm1_ph = inputs_ph

        import pdb
        pdb.set_trace()

        #####################
        # GRU for modelling past orchestra
        # First layer
        gru_cells = [GRUCell(e, activation='relu') for e in self.n_hs]
        states = [tf.zeros([128, e]) for e in self.n_hs]

        # ON EST OBLIGE DUTILISER tf.nn.dynamic_rnn
        # https://stackoverflow.com/questions/42440565/how-to-feed-back-rnn-output-to-input-in-tensorflow
        # Juste passer des sequences de longueur 1 au moment du train

        for t in range(self.temporal_order):
            piano_t = piano_ph[:, t, :]
            # HERE IMPLEMENT TEACHER FORCING RELAXATION
            orch_tm1 = orch_tm1_ph[:, t, :]
            x = tf.concat([piano_t, orch_tm1], axis=1)
            new_states = []
            for gru_cell, state in zip(gru_cells, states):
                x, state_out = gru_cell(x, state)
                new_states.append(state_out)
            states = new_states
            orch_t.append(x)

        return orch_pred, orch_pred
Beispiel #2
0
 def __new_cell(self):
     if self.cell_type == 'lstm':
         return BasicLSTMCell(self.units)
     elif self.cell_type == 'rnn':
         return BasicRNNCell(self.units)
     else:
         return GRUCell(self.units)
Beispiel #3
0
def GRULayer(input_tensor, num_layers=1):
    input_tensor = tf.reshape(
        input_tensor,
        shape=[-1, ConfigUtil.seq_length, ConfigUtil.hidden_size])
    cell = GRUCell(num_units=128, kernel_initializer=create_initializer())
    cell = DropoutWrapper(cell, output_keep_prob=(1 - ConfigUtil.dropout_prob))
    cell = MultiRNNCell([cell] * num_layers) if num_layers > 1 else cell
    outputs, state = dynamic_rnn(cell, input_tensor, dtype=tf.float32)
    return outputs
Beispiel #4
0
def cbhg(inputs, input_lengths, is_training, scope, K, projections):
  with tf.variable_scope(scope):
    with tf.variable_scope('conv_bank'):
      # Convolution bank: concatenate on the last axis to stack channels from all convolutions
      conv_outputs = tf.concat(
        [conv1d(inputs, k, 128, tf.nn.relu, is_training, 'conv1d_%d' % k) for k in range(1, K+1)],
        axis=-1
      )

    # Maxpooling:
    maxpool_output = tf.layers.max_pooling1d(
      conv_outputs,
      pool_size=2,
      strides=1,
      padding='same')

    # Two projection layers:
    proj1_output = conv1d(maxpool_output, 3, projections[0], tf.nn.relu, is_training, 'proj_1')
    proj2_output = conv1d(proj1_output, 3, projections[1], None, is_training, 'proj_2')

    # Residual connection:
    highway_input = proj2_output + inputs

    # Handle dimensionality mismatch:
    if highway_input.shape[2] != 128:
      highway_input = tf.layers.dense(highway_input, 128)

    # 4-layer HighwayNet:
    for i in range(4):
      highway_input = highwaynet(highway_input, 'highway_%d' % (i+1))
    rnn_input = highway_input

    # Bidirectional RNN
    outputs, states = tf.nn.bidirectional_dynamic_rnn(
      GRUCell(128),
      GRUCell(128),
      rnn_input,
      sequence_length=input_lengths,
      dtype=tf.float32)
    return tf.concat(outputs, axis=2)  # Concat forward and backward
Beispiel #5
0
    def __init__(self,
                 n_mid,
                 embedding_dim,
                 hidden_size,
                 batch_size,
                 seq_len=256):
        super(Model_GRU4REC, self).__init__(n_mid,
                                            embedding_dim,
                                            hidden_size,
                                            batch_size,
                                            seq_len,
                                            flag="GRU4REC")
        with tf.name_scope('rnn_1'):
            self.sequence_length = self.mask_length
            rnn_outputs, final_state1 = tf.nn.dynamic_rnn(
                GRUCell(hidden_size),
                inputs=self.item_his_eb,
                sequence_length=self.sequence_length,
                dtype=tf.float32,
                scope="gru1")

        self.user_eb = final_state1
        self.build_sampled_softmax_loss(self.item_eb, self.user_eb)
Beispiel #6
0
import pandas as pd
import tensorflow as tf
from tensorflow.nn.rnn_cell import GRUCell

def length(sequence):
    used = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=2))
    length = tf.reduce_sum(used, reduction_indices=1)
    length = tf.cast(length, tf.int32)
    return length

max_length = 100
frame_size = 64
num_hidden = 200

sequence = tf.placeholder(tf.float32, [None, max_length, frame_size])
output, state = tf.nn.dynamic_rnn(
    GRUCell(num_hidden),
    sequence,
    dtype=tf.float32,
    sequence_length=length(sequence),
)
Beispiel #7
0
#or a nested tuple of such elements.

x = tf.placeholder(dtype=tf.int32, shape=[None, None])
y = tf.placeholder(dtype=tf.int64, shape=[None])
sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
keep_prob = tf.placeholder(dtype=tf.float32)
num_units = 100
n_epoch = 100

with tf.variable_scope('embedding'):
    rnn_input = tf.contrib.layers.embed_sequence(x,
                                                 vocab_size=embed_ingred_size,
                                                 embed_dim=embed_size)

with tf.variable_scope('rnn'):
    cell = GRUCell(num_units)
    cell = DropoutWrapper(cell, output_keep_prob=keep_prob)
    cell = MultiRNNCell([cell for _ in range(num_layers)])

    outputs, states = tf.nn.dynamic_rnn(cell,
                                        rnn_input,
                                        dtype=tf.float32,
                                        sequence_length=sequence_length)
    # ★Attention
    # 'outputs' is a tensor of shape [batch_size, max_time, num_of_units]
    # 'state' is a N-tuple where N is the number of GRUCells containing a
    # tf.contrib.rnn.GRUcells for each cell

with tf.variable_scope('full_connected'):
    state = states[-1]
    fc = tf.contrib.layers.fully_connected(state,
x = tf.placeholder(dtype=tf.int32, shape=[None, None])
y = tf.placeholder(dtype=tf.int64, shape=[None])
sequence_length = tf.placeholder(dtype=tf.int32, shape=[None])
keep_prob = tf.placeholder(dtype=tf.float32)
num_units = 100
n_epoch = 3000

with tf.variable_scope('embedding'):
    rnn_input = tf.contrib.layers.embed_sequence(x,
                                                 vocab_size=embed_ingred_size,
                                                 embed_dim=embed_size)

with tf.variable_scope('rnn'):
    with tf.variable_scope('forward'):
        fw_cells = [GRUCell(num_units) for _ in range(num_layers)]
        fw_cells = [
            DropoutWrapper(fw_cell, output_keep_prob=keep_prob)
            for fw_cell in fw_cells
        ]
        fw_cells = MultiRNNCell(fw_cells)

    with tf.variable_scope('Backward'):
        bw_cells = [GRUCell(num_units) for _ in range(num_layers)]
        bw_cells = [
            DropoutWrapper(bw_cell, output_keep_prob=keep_prob)
            for bw_cell in bw_cells
        ]
        bw_cells = MultiRNNCell(bw_cells)

    outputs, states = bidirectional_dynamic_rnn(
Beispiel #9
0
end_labels = [randint(p_len // 2 + 1, p_len - 1) for _ in range(64)]

# use CNN
out_dim = 64
window_len = 10

with tf.Session() as sess:
    for i in tqdm(range(64)):
        p, q, start_label, end_label = [p_corpus[i]], \
            [q_corpus[i]], [start_labels[i]], [end_labels[i]]

        # q encodes
        q_emb = tf.nn.embedding_lookup(word_embed, q)

        with tf.variable_scope("Question_Encoder", reuse=tf.AUTO_REUSE):
            cell_fw = GRUCell(num_units=hidden_size)
            cell_bw = GRUCell(num_units=hidden_size)

            output, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, q_emb, \
                sequence_length = [q_len], dtype = tf.float32)

            # concat the forwaed and backward encoded information
            q_encodes = tf.concat(output, 2)

        # do the same to get `p_encodes`
        # p encodes
        p_emb = tf.nn.embedding_lookup(word_embed, p)
        with tf.variable_scope("Paragraph_Encoder", reuse=tf.AUTO_REUSE):
            cell_fw = GRUCell(num_units=hidden_size)
            cell_bw = GRUCell(num_units=hidden_size)
            output, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, p_emb, \