Exemple #1
0
def train(sess, mnist, n_training_epochs, batch_size, summaries_op,
          accuracy_summary_op, train_writer, test_writer, X, Y, train_op,
          loss_op, accuracy_op):
    # compute number of batches for given batch_size
    num_train_batches = mnist.train.num_examples // batch_size

    # record starting time
    train_start = time.time()

    # Run through the entire dataset n_training_epochs times
    for i in range(n_training_epochs):
        # Initialise statistics
        training_loss = 0
        epoch_start = time.time()

        # Run the SGD train op for each minibatch
        for _ in range(num_train_batches):
            batch = mnist.train.next_batch(batch_size)
            trainstep_result, batch_loss, summary = \
                qfns.train_step(sess, batch, X, Y, train_op, loss_op, summaries_op)
            train_writer.add_summary(summary, i)
            training_loss += batch_loss

        # Timing and statistics
        epoch_duration = round(time.time() - epoch_start, 2)
        ave_train_loss = training_loss / num_train_batches

        # Get accuracy
        train_accuracy = \
            accuracy(sess, mnist.train, batch_size, X, Y, accuracy_op)
        test_accuracy = \
            accuracy(sess, mnist.test, batch_size, X, Y, accuracy_op)

        # log accuracy at the current epoch on training and test sets
        train_acc_summary = sess.run(
            accuracy_summary_op,
            feed_dict={accuracy_placeholder: train_accuracy})
        train_writer.add_summary(train_acc_summary, i)
        test_acc_summary = sess.run(
            accuracy_summary_op,
            feed_dict={accuracy_placeholder: test_accuracy})
        test_writer.add_summary(test_acc_summary, i)
        [writer.flush() for writer in [train_writer, test_writer]]

        train_duration = round(time.time() - train_start, 2)
        # Output to montior training
        print('Epoch {0}, Training Loss: {1}, Test accuracy: {2}, \
time: {3}s, total time: {4}s'.format(i, ave_train_loss, test_accuracy,
                                     epoch_duration, train_duration))
    print('Total training time: {0}s'.format(train_duration))
    print('Confusion Matrix:')
    true_class = tf.argmax(Y, 1)
    predicted_class = tf.argmax(preds_op, 1)
    cm = tf.confusion_matrix(predicted_class, true_class)
    print(sess.run(cm, feed_dict={X: mnist.test.images, Y: mnist.test.labels}))
Exemple #2
0
def train(sess, mnist, n_training_epochs, batch_size,
          summaries_op, accuracy_summary_op, train_writer, test_writer,
          X, Y, train_op, loss_op, accuracy_op):
    # compute number of batches for given batch_size
    num_train_batches = mnist.train.num_examples // batch_size

    # record starting time
    train_start = time.time()

    # Run through the entire dataset n_training_epochs times
    for i in range(n_training_epochs):
        # Initialise statistics
        training_loss = 0
        epoch_start = time.time()

        # Run the SGD train op for each minibatch
        for _ in range(num_train_batches):
            batch = mnist.train.next_batch(batch_size)
            trainstep_result, batch_loss, summary = \
                qfns.train_step(sess, batch, X, Y, train_op, loss_op, summaries_op)
            train_writer.add_summary(summary, i)
            training_loss += batch_loss

        # Timing and statistics
        epoch_duration = round(time.time() - epoch_start, 2)
        ave_train_loss = training_loss / num_train_batches

        # Get accuracy
        train_accuracy = \
            accuracy(sess, mnist.train, batch_size, X, Y, accuracy_op)
        test_accuracy = \
            accuracy(sess, mnist.test, batch_size, X, Y, accuracy_op)

        # log accuracy at the current epoch on training and test sets
        train_acc_summary = sess.run(accuracy_summary_op,
                               feed_dict={accuracy_placeholder: train_accuracy})
        train_writer.add_summary(train_acc_summary, i)
        test_acc_summary = sess.run(accuracy_summary_op,
                                feed_dict={accuracy_placeholder: test_accuracy})
        test_writer.add_summary(test_acc_summary, i)
        [writer.flush() for writer in [train_writer, test_writer]]

        train_duration = round(time.time() - train_start, 2)
        # Output to montior training
        print('Epoch {0}, Training Loss: {1}, Test accuracy: {2}, \
time: {3}s, total time: {4}s'.format(i, ave_train_loss,
                                     test_accuracy, epoch_duration,
                                     train_duration))
    print('Total training time: {0}s'.format(train_duration))
    print('Confusion Matrix:')
    true_class=tf.argmax(Y, 1)
    predicted_class=tf.argmax(preds_op, 1)
    cm=tf.confusion_matrix(predicted_class,true_class)
    print(sess.run(cm, feed_dict={X: mnist.test.images,
                                  Y: mnist.test.labels}))
Exemple #3
0
def train(sess,
          mnist,
          n_training_epochs,
          batch_size,
          X,
          Y,
          train_op,
          loss_op,
          accuracy_op,
          accuracy_placeholder,
          summaries_op=None,
          accuracy_summary_op=None,
          train_writer=None,
          test_writer=None):
    # compute number of batches for given batch_size
    num_train_batches = mnist.train.num_examples // batch_size

    # record starting time
    train_start = time.time()

    # Run through the entire dataset n_training_epochs times
    for i in range(n_training_epochs):
        # Initialise statistics
        training_loss = 0
        epoch_start = time.time()

        # Run the SGD train op for each minibatch
        for _ in range(num_train_batches):
            batch = mnist.train.next_batch(batch_size)
            # trainstep_result, batch_loss, summary = \
            #     qfns.train_step(sess, batch, X, Y, train_op, loss_op, summaries_op)
            trainstep_result, batch_loss, _ = \
                qfns.train_step(sess, batch, X, Y, train_op, loss_op)
            # train_writer.add_summary(summary, i)
            training_loss += batch_loss

        # Timing and statistics
        epoch_duration = round(time.time() - epoch_start, 2)
        ave_train_loss = training_loss / num_train_batches

        # Get accuracy
        train_accuracy = \
            accuracy(sess, mnist.train, batch_size, X, Y, accuracy_op, mnist)
        test_accuracy = \
            accuracy(sess, mnist.test, batch_size, X, Y, accuracy_op, mnist)

        # log accuracy at the current epoch on training and test sets
        # train_acc_summary = sess.run(accuracy_summary_op,
        #                              feed_dict={accuracy_placeholder: train_accuracy})
        # train_writer.add_summary(train_acc_summary, i)
        # test_acc_summary = sess.run(accuracy_summary_op,
        #                             feed_dict={accuracy_placeholder: test_accuracy})
        # test_writer.add_summary(test_acc_summary, i)
        # [writer.flush() for writer in [train_writer, test_writer]]

        train_duration = round(time.time() - train_start, 2)

        # Output to montior training
        print('Epoch {0}, Training Loss: {1}, Test accuracy: {2}, \
                time: {3}s, total time: {4}s'.format(i, ave_train_loss,
                                                     test_accuracy,
                                                     epoch_duration,
                                                     train_duration))
    print('Total training time: {0}s'.format(train_duration))
    return train_accuracy, test_accuracy