Exemple #1
0
hiddendim = 100
embeddingdim = 100
graph = tf.Graph()
onehot = True
inputdim = maxlength * vocabsize if onehot else maxlength * embeddingdim

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)
# Input data.
dataset = tf.placeholder(tf.int32, shape=[batch_size, maxlength], name='Train')
labels = tf.placeholder(tf.float32,
                        shape=[batch_size, labelspace],
                        name='Label')
# Model
hidden_layer = Layer.W(inputdim, hiddendim, 'Hidden')
hidden_bias = Layer.b(hiddendim, 'HiddenBias')
# Prediction
output_layer = Layer.W(hiddendim, labelspace, 'Output')
output_bias = Layer.b(labelspace, 'OutputBias')

embedded = tf.reshape(embeddings.lookup(dataset), [batch_size, inputdim])
forward = tf.nn.relu(tf.matmul(embedded, hidden_layer) + hidden_bias)
dropout = tf.nn.dropout(forward, 0.5)
logits = tf.matmul(dropout, output_layer) + output_bias

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))
train_op = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)

# Input -> LSTM -> Outstate
inputs = tf.placeholder(tf.int32, [batch_size, maxlength])
labels = tf.placeholder(tf.float32, [batch_size, labelspace])
lengths = tf.placeholder(tf.int32, [batch_size])


# RNN
lstm = tf.nn.rnn_cell.LSTMCell(hiddendim,
                               initializer=tf.contrib.layers.xavier_initializer(seed=20160501))
lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=dropout)

# Prediction
output_layer = Layer.W(2*hiddendim, labelspace, 'Output')
output_bias  = Layer.b(labelspace, 'OutputBias')

outputs, fstate = tf.nn.dynamic_rnn(lstm, embeddings.lookup(inputs), 
                                    sequence_length=lengths, 
                                    dtype=tf.float32)
logits = tf.matmul(fstate, output_layer) + output_bias
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))

## Learning ##
## Optimizer Adam/RMSPropOptimizer   tf.train.AdamOptimizer()
optimizer = tf.train.AdamOptimizer()
## Gradient Clipping:
##tvars = tf.trainable_variables()
##grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 5.0)
##train_op = optimizer.apply_gradients(zip(grads, tvars))
train_op = optimizer.minimize(loss)
Exemple #3
0
                               initializer=tf.contrib.layers.xavier_initializer(seed=20160501))
lstm[0] = tf.nn.rnn_cell.DropoutWrapper(lstm[0], output_keep_prob=dropout)
lstm[1] = tf.nn.rnn_cell.DropoutWrapper(lstm[1], output_keep_prob=dropout)
lstm[2] = tf.nn.rnn_cell.DropoutWrapper(lstm[2], output_keep_prob=dropout)
lstm[0] = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm[0]] * multicells, state_is_tuple=True)
lstm[1] = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm[1]] * multicells, state_is_tuple=True)
lstm[2] = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm[2]] * multicells, state_is_tuple=True)


# Prediction
output_layer = {}
output_layer[0] = Layer.W(multicells*hiddendim, 20, 'Output-Sou')
output_layer[1] = Layer.W(multicells*hiddendim, 20, 'Output-Ref')
output_layer[2] = Layer.W(multicells*hiddendim, 9, 'Output-Dir')
output_bias = {}
output_bias[0]  = Layer.b(20, 'OutputBias-Sou')
output_bias[1]  = Layer.b(20, 'OutputBias-Ref')
output_bias[2]  = Layer.b(9, 'OutputBias-Dir')

outputs = {}
fstate = {}
with tf.variable_scope("lstm0"):
  outputs[0], fstate[0] = tf.nn.dynamic_rnn(lstm[0], embeddings[0].lookup(inputs),
                                      sequence_length=lengths, 
                                      dtype=tf.float32)
with tf.variable_scope("lstm1"):
  outputs[1], fstate[1] = tf.nn.dynamic_rnn(lstm[1], embeddings[1].lookup(inputs),
                                      sequence_length=lengths, 
                                      dtype=tf.float32)
with tf.variable_scope("lstm2"):
  outputs[2], fstate[2] = tf.nn.dynamic_rnn(lstm[2], embeddings[2].lookup(inputs),
Exemple #4
0
inputs = tf.placeholder(tf.int32, [batch_size, maxlength], name="Utterance")
lengths = tf.placeholder(tf.int32, [batch_size], name="Lengths")

final_size = rep_dim - 5*2
## weights && Convolutions
W = {
  'cl1': Layer.convW([3, 3, 20, filters]),
  'cl2': Layer.convW([3, 3, filters, filters]),
  'cl3': Layer.convW([3, 3, filters, filters]),
  'cl4': Layer.convW([3, 3, filters, filters]),
  'cl5': Layer.convW([3, 3, filters, filters]),
  'out': Layer.W(final_size*final_size*filters + 2*hiddendim, rep_dim*rep_dim)
}

B = {
  'cb1': Layer.b(filters, init='Normal'),
  'cb2': Layer.b(filters, init='Normal'),
  'cb3': Layer.b(filters, init='Normal'),
  'cb4': Layer.b(filters, init='Normal'),
  'cb5': Layer.b(filters, init='Normal'),
  'out': Layer.b(rep_dim*rep_dim)
}

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=False, embedding_size=hiddendim)

# RNN
dropout = 0.75
lstm = tf.nn.rnn_cell.LSTMCell(hiddendim,
               initializer=tf.contrib.layers.xavier_initializer(seed=20160501))
lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=dropout)
Exemple #5
0
lstm[1] = tf.contrib.rnn.DropoutWrapper(lstm[1], output_keep_prob=dropout)
lstm[2] = tf.contrib.rnn.DropoutWrapper(lstm[2], output_keep_prob=dropout)
lstm[0] = tf.contrib.rnn.MultiRNNCell(cells=[lstm[0]] * multicells,
                                      state_is_tuple=True)
lstm[1] = tf.contrib.rnn.MultiRNNCell(cells=[lstm[1]] * multicells,
                                      state_is_tuple=True)
lstm[2] = tf.contrib.rnn.MultiRNNCell(cells=[lstm[2]] * multicells,
                                      state_is_tuple=True)

# Prediction
output_layer = {}
output_layer[0] = Layer.W(multicells * hiddendim, 20, 'Output-Sou')
output_layer[1] = Layer.W(multicells * hiddendim, 20, 'Output-Ref')
output_layer[2] = Layer.W(multicells * hiddendim, 9, 'Output-Dir')
output_bias = {}
output_bias[0] = Layer.b(20, 'OutputBias-Sou')
output_bias[1] = Layer.b(20, 'OutputBias-Ref')
output_bias[2] = Layer.b(9, 'OutputBias-Dir')

outputs = {}
fstate = {}
with tf.variable_scope("lstm0"):
    outputs[0], fstate[0] = tf.nn.dynamic_rnn(lstm[0],
                                              embeddings[0].lookup(inputs),
                                              sequence_length=lengths,
                                              dtype=tf.float32)
with tf.variable_scope("lstm1"):
    outputs[1], fstate[1] = tf.nn.dynamic_rnn(lstm[1],
                                              embeddings[1].lookup(inputs),
                                              sequence_length=lengths,
                                              dtype=tf.float32)
batch_size = 128
hiddendim = 100
embeddingdim = 100
graph = tf.Graph()
onehot = True
inputdim = maxlength*vocabsize if onehot else maxlength*embeddingdim

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)
# Input data.
dataset = tf.placeholder(tf.int32, shape=[batch_size, maxlength], name='Train')
labels = tf.placeholder(tf.float32, shape=[batch_size, labelspace], name='Label')
# Model
hidden_layer = Layer.W(inputdim, hiddendim, 'Hidden')
hidden_bias  = Layer.b(hiddendim, 'HiddenBias')
# Prediction
output_layer = Layer.W(hiddendim, labelspace, 'Output')
output_bias  = Layer.b(labelspace, 'OutputBias')

embedded = tf.reshape(embeddings.lookup(dataset), [batch_size,inputdim])
forward = tf.nn.relu(tf.matmul(embedded, hidden_layer) + hidden_bias)
dropout = tf.nn.dropout(forward, 0.5)
logits = tf.matmul(dropout, output_layer) + output_bias

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))
train_op = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))

with tf.Session() as sess:
  sess.run(tf.initialize_all_variables())
Exemple #7
0
    Layer.convW([3, 3, 20, filters]),
    'cl2':
    Layer.convW([3, 3, filters, filters]),
    'cl3':
    Layer.convW([3, 3, filters, filters]),
    'cl4':
    Layer.convW([3, 3, filters, filters]),
    'cl5':
    Layer.convW([3, 3, filters, filters]),
    'out':
    Layer.W(final_size * final_size * filters + 2 * hiddendim,
            rep_dim * rep_dim)
}

B = {
    'cb1': Layer.b(filters, init='Normal'),
    'cb2': Layer.b(filters, init='Normal'),
    'cb3': Layer.b(filters, init='Normal'),
    'cb4': Layer.b(filters, init='Normal'),
    'cb5': Layer.b(filters, init='Normal'),
    'out': Layer.b(rep_dim * rep_dim)
}

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=False, embedding_size=hiddendim)

# RNN
dropout = 0.75
lstm = tf.nn.rnn_cell.LSTMCell(
    hiddendim, initializer=tf.contrib.layers.xavier_initializer(seed=20160501))
lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=dropout)