예제 #1
0
def train(n_vocab, labels, embedding, embed, int_to_vocab):
    loss_op, train_op = get_loss_and_training_op(n_vocab, labels, embed)
    valid_words = sample_eval_data()
    with tf.Session() as sess:
        saver = tf.train.Saver()
        all_losses = []
        batch_loss = []
        sess.run(tf.global_variables_initializer())
        start = time.time()
        for i in range(FLAGS.total_iterations):
            loss, _ = sess.run([loss_op, train_op])
            all_losses.append(loss)
            batch_loss.append(loss)
            if i % FLAGS.log_every == 0:
                end = time.time()
                print(
                    'Iteration {}/{} '.format(i, FLAGS.total_iterations),
                    'Average Loss: {:.4f}'.format(np.mean(batch_loss)),
                    '{:.4f} sec/{} iterations'.format((end - start),
                                                      FLAGS.log_every))
                batch_loss = []
                start = time.time()

            if i % FLAGS.evaluate_every == 0:
                saver.save(sess, 'checkpoint/model-{}.ckpt'.format(i))
                pred_op = get_predictions(valid_words, embedding)
                predictions = sess.run(pred_op)
                words = get_top_10_words(predictions, int_to_vocab)
        saver.save(sess, 'checkpoint/model.ckpt')
        np.savez('checkpoint/all_losses.npz', all_losses)
예제 #2
0
def predict(valid_words, estimator, int_to_vocab, vocab_to_int):
    pred_generator = estimator.predict(
        input_fn=lambda: get_eval_dataset(valid_words))
    for predictions in pred_generator:
        words = get_top_10_words(np.expand_dims(predictions['similarity'], 0),
                                 int_to_vocab)
    return words
예제 #3
0
def predict(valid_words, embedding, int_to_vocab):
    pred_op = get_predictions(valid_words, embedding)
    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, 'checkpoint/model.ckpt')
        predictions = sess.run(pred_op)
        words = get_top_10_words(predictions, int_to_vocab)
    return words