Esempio n. 1
0
output_layer = Layer.W(2*hiddendim, labelspace, 'Output')
output_bias  = Layer.b(labelspace, 'OutputBias')

outputs, fstate = tf.nn.dynamic_rnn(lstm, embeddings.lookup(inputs), 
                                    sequence_length=lengths, 
                                    dtype=tf.float32)
logits = tf.matmul(fstate, output_layer) + output_bias
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))

## Learning ##
## Optimizer Adam/RMSPropOptimizer   tf.train.AdamOptimizer()
optimizer = tf.train.AdamOptimizer()
## Gradient Clipping:
##tvars = tf.trainable_variables()
##grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), 5.0)
##train_op = optimizer.apply_gradients(zip(grads, tvars))
train_op = optimizer.minimize(loss)
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))

# Add this to session to see cpu/gpu placement: 
# config=tf.ConfigProto(log_device_placement=True)
with tf.Session() as sess:
  sess.run(tf.initialize_all_variables())

  # Model params
  Trainer = Training(sess, correct_prediction, train_op, loss, 
                     inputs, labels, lengths, batch_size)
  # Run training
  Trainer.train(training, training_labels, development, development_labels, 
                generate_batch, train_lens, dev_lens)
Esempio n. 2
0
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)
# Input data.
dataset = tf.placeholder(tf.int32, shape=[batch_size, maxlength], name='Train')
labels = tf.placeholder(tf.float32,
                        shape=[batch_size, labelspace],
                        name='Label')
# Model
hidden_layer = Layer.W(inputdim, hiddendim, 'Hidden')
hidden_bias = Layer.b(hiddendim, 'HiddenBias')
# Prediction
output_layer = Layer.W(hiddendim, labelspace, 'Output')
output_bias = Layer.b(labelspace, 'OutputBias')

embedded = tf.reshape(embeddings.lookup(dataset), [batch_size, inputdim])
forward = tf.nn.relu(tf.matmul(embedded, hidden_layer) + hidden_bias)
dropout = tf.nn.dropout(forward, 0.5)
logits = tf.matmul(dropout, output_layer) + output_bias

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))
train_op = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    # Model params
    Trainer = Training(sess, correct_prediction, train_op, loss, dataset,
                       labels)
    # Run training
    Trainer.train(training, training_labels, development, development_labels,
                  generate_batch)
Esempio n. 3
0
        return jsonify(output)
    else:
        return """<html><body>
    GET not implemented
    </body></html>"""


saver = tf.train.Saver()
# Add this to session to see cpu/gpu placement:
# config=tf.ConfigProto(log_device_placement=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())

if DoTrain:
    print "Train Source"
    Trainer = Training(sess, correct_prediction[0], logits[0], train_op[0],
                       loss[0], inputs, labels[0], lengths, batch_size, 7)  #6
    Trainer.train(training[0], training_labels[0], testing[0],
                  testing_labels[0], generate_batch, training_lens[0],
                  testing_lens[0])

    print "Train Reference"
    Trainer = Training(sess, correct_prediction[1], logits[1], train_op[1],
                       loss[1], inputs, labels[1], lengths, batch_size, 5)  #2
    Trainer.train(training[1], training_labels[1], testing[1],
                  testing_labels[1], generate_batch, training_lens[1],
                  testing_lens[1])

    print "Train Direction"
    Trainer = Training(sess, correct_prediction[2], logits[2], train_op[2],
                       loss[2], inputs, labels[2], lengths, batch_size, 2)  #2
    Trainer.train(training[2], training_labels[2], testing[2],
Esempio n. 4
0
    #time.sleep(1) # delays for 5 seconds
    return jsonify(output)
  else:
    return """<html><body>
    GET not implemented
    </body></html>"""

saver = tf.train.Saver()
# Add this to session to see cpu/gpu placement: 
# config=tf.ConfigProto(log_device_placement=True)
sess = tf.Session()
sess.run(tf.initialize_all_variables())

if DoTrain:
  print "Train Source"
  Trainer = Training(sess, correct_prediction[0], logits[0], train_op[0], loss[0],
                     inputs, labels[0], lengths, batch_size, 7) #6
  Trainer.train(training[0], training_labels[0], testing[0], 
                testing_labels[0], generate_batch, training_lens[0],
                testing_lens[0])

  print "Train Reference"
  Trainer = Training(sess, correct_prediction[1], logits[1], train_op[1], loss[1],
                     inputs, labels[1], lengths, batch_size, 5) #2
  Trainer.train(training[1], training_labels[1], testing[1], 
                testing_labels[1], generate_batch, training_lens[1],
                testing_lens[1])

  print "Train Direction"
  Trainer = Training(sess, correct_prediction[2], logits[2], train_op[2], loss[2],
                     inputs, labels[2], lengths, batch_size, 2) #2
  Trainer.train(training[2], training_labels[2], testing[2], 
Esempio n. 5
0
onehot = True
inputdim = maxlength*vocabsize if onehot else maxlength*embeddingdim

# Define embeddings matrix
embeddings = Embedding(vocabsize, one_hot=onehot, embedding_size=embeddingdim)
# Input data.
dataset = tf.placeholder(tf.int32, shape=[batch_size, maxlength], name='Train')
labels = tf.placeholder(tf.float32, shape=[batch_size, labelspace], name='Label')
# Model
hidden_layer = Layer.W(inputdim, hiddendim, 'Hidden')
hidden_bias  = Layer.b(hiddendim, 'HiddenBias')
# Prediction
output_layer = Layer.W(hiddendim, labelspace, 'Output')
output_bias  = Layer.b(labelspace, 'OutputBias')

embedded = tf.reshape(embeddings.lookup(dataset), [batch_size,inputdim])
forward = tf.nn.relu(tf.matmul(embedded, hidden_layer) + hidden_bias)
dropout = tf.nn.dropout(forward, 0.5)
logits = tf.matmul(dropout, output_layer) + output_bias

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))
train_op = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))

with tf.Session() as sess:
  sess.run(tf.initialize_all_variables())
  # Model params
  Trainer = Training(sess, correct_prediction, train_op, loss, dataset, labels)
  # Run training
  Trainer.train(training, training_labels, development, development_labels, generate_batch)