Example #1
0
def train(train_set, test_set):
    input_dim = 2
    n_classes = 2

    # inference model
    with tf.name_scope('model'):
        i = tf.placeholder("float", name='input')
        o = tf.placeholder("int32", name='true_output')

        l1 = linear(
                input=i,
                input_size=input_dim,
                output_size=3,
                name='linear_1'
        )

        a1 = tf.nn.tanh(l1, name='tanh_activation')

        l2 = linear(
                input=a1,
                input_size=l1.input_size,
                output_size=n_classes,
                name='linear_2'
        )

        p_o_i = tf.nn.softmax(l2, name="softmax_output")

    with tf.name_scope('loss'):
        one_hot_labels = dense_to_one_hot(o, n_classes)
        loss = tf.reduce_mean(-one_hot_labels * tf.log(p_o_i), name='loss')
        tf.scalar_summary('loss', loss)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(one_hot_labels, 1), tf.argmax(p_o_i, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
        tf.scalar_summary('accuracy', accuracy)

    with tf.Session() as sess:
        # Merge all the summaries and write them out to ./log
        merged = tf.merge_all_summaries()
        writer = tf.train.SummaryWriter('./log', sess.graph_def)
        saver = tf.train.Saver()

        # training
        train_op = tf.train.RMSPropOptimizer(FLAGS.learning_rate, FLAGS.decay, name='trainer').minimize(loss)
        tf.initialize_all_variables().run()

        for epoch in range(FLAGS.max_epochs):
            sess.run(train_op, feed_dict={i: train_set['features'], o: train_set['targets']})

            if epoch % max(int(FLAGS.max_epochs / 100), 1) == 0:
                summary, lss, acc = sess.run([merged, loss, accuracy],
                                             feed_dict={i: test_set['features'], o: test_set['targets']})
                writer.add_summary(summary, epoch)
                print()
                print('Epoch: {epoch}'.format(epoch=epoch))
                print(' - accuracy = {acc}'.format(acc=acc))
                print(' - loss     = {lss}'.format(lss=lss))

        save_path = saver.save(sess, "model.ckpt")
        print()
        print("Model saved in file: %s" % save_path)
        print()

        print('Test features')
        print(test_set['features'])
        print('Test targets')
        print(test_set['targets'])
        print('Predictions')
        p_o_i = sess.run(p_o_i, feed_dict={i: test_set['features'], o: test_set['targets']})
        print(p_o_i)
        print('Argmax predictions')
        print(np.argmax(p_o_i, 1).reshape((-1, 1)))
Example #2
0
def train(train_set, test_set, idx2word, word2idx):
    embedding_size = 5
    vocabulary_length = len(idx2word)
    sequence_size = train_set['features'].shape[1]
    lstm_size = 5

    # inference model
    with tf.name_scope('model'):
        i = tf.placeholder("int32", name='input')
        o = tf.placeholder("int32", name='true_output')

        with tf.variable_scope("batch_size"):
            batch_size = tf.shape(i)[0]

        e = embedding(
                input=i,
                length=vocabulary_length,
                size=embedding_size,
                name='embedding'
        )

        with tf.name_scope("RNNCell"):
            cell = LSTMCell(lstm_size, input_size=embedding_size)
            state = cell.zero_state(batch_size, tf.float32)

        outputs, states = rnn(
                cell=cell,
                inputs=[e[:, j, :] for j in range(sequence_size)],
                initial_state=state,
                name='RNN'
        )

        final_state = states[-1]

        l = linear(
                input=final_state,
                input_size=cell.state_size,
                output_size=vocabulary_length,
                name='linear'
        )

        p_o_i = tf.nn.softmax(l, name="softmax_output")

    with tf.name_scope('loss'):
        one_hot_labels = dense_to_one_hot(o, vocabulary_length)
        loss = tf.reduce_mean(-one_hot_labels * tf.log(p_o_i), name='loss')
        tf.scalar_summary('loss', loss)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(one_hot_labels, 1), tf.argmax(p_o_i, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
        tf.scalar_summary('accuracy', accuracy)

    with tf.Session() as sess:
        # Merge all the summaries and write them out to ./log
        merged = tf.merge_all_summaries()
        writer = tf.train.SummaryWriter('./log', sess.graph_def)
        saver = tf.train.Saver()

        # training
        train_op = tf.train.AdamOptimizer(FLAGS.learning_rate, name='trainer').minimize(loss)
        tf.initialize_all_variables().run()

        for epoch in range(FLAGS.max_epochs):
            sess.run(train_op, feed_dict={i: train_set['features'], o: train_set['targets']})

            if epoch % max(int(FLAGS.max_epochs / 100), 1) == 0:
                summary, lss, acc = sess.run([merged, loss, accuracy],
                                             feed_dict={i: test_set['features'], o: test_set['targets']})
                writer.add_summary(summary, epoch)
                print()
                print('Epoch: {epoch}'.format(epoch=epoch))
                print(' - accuracy = {acc}'.format(acc=acc))
                print(' - loss     = {lss}'.format(lss=lss))

        save_path = saver.save(sess, "model.ckpt")
        print()
        print("Model saved in file: %s" % save_path)
        print()

        print('Test features')
        print(test_set['features'])
        print('Test targets')
        print(test_set['targets'])
        # print('Predictions')
        p_o_i = sess.run(p_o_i, feed_dict={i: test_set['features'], o: test_set['targets']})
        # print(p_o_i)
        print('Argmax predictions')
        print(np.argmax(p_o_i, 1).reshape((-1, 1)))