Пример #1
0
cifar10 = cifar10_utils.get_cifar10('./cifar10/cifar-10-batches-py')

print('done getting cifar10')

image_shape = cifar10.train.images.shape[1:4]
num_classes = cifar10.test.labels.shape[1]

# Construct linear convnet graph
x = tf.placeholder(tf.float32, shape=[None] + list(image_shape), name='x')
y = tf.placeholder(tf.int32, shape=(None, num_classes), name='y')
is_training = tf.placeholder(dtype=tf.bool, shape=(), name='isTraining')

model = ConvNet(is_training=is_training)

_ = model.inference(x)
#%%
with tf.Session() as sess:

    # Initialise all variables
    tf.initialize_all_variables().run(session=sess)

    checkpoint_dirs = ['checkpoints_0reg_lr1e4_sqrtinit']
    ckpt_file = 'epoch14000.ckpt'
    #subdir = './'
    subdir = 'checkpoints_new/'
    test_size = 1000

    saver = tf.train.Saver()

    for ckpt_path in checkpoint_dirs:
Пример #2
0
def train():
    """
    Performs training and evaluation of ConvNet model.
    
    First define your graph using class ConvNet and its methods. Then define
    necessary operations such as trainer (train_step in this case), savers
    and summarizers. Finally, initialize your model within a tf.Session and
    do the training.
    
    ---------------------------
    How to evaluate your model:
    ---------------------------
    Evaluation on test set should be conducted over full batch, i.e. 10k images,
    while it is alright to do it over minibatch for train set.
    
    ---------------------------------
    How often to evaluate your model:
    ---------------------------------
    - on training set every print_freq iterations
    - on test set every eval_freq iterations
    
    ------------------------
    Additional requirements:
    ------------------------
    Also you are supposed to take snapshots of your model state (i.e. graph,
    weights and etc.) every checkpoint_freq iterations. For this, you should
    study TensorFlow's tf.train.Saver class. For more information, please
    checkout:
    [https://www.tensorflow.org/versions/r0.11/how_tos/variables/index.html]
    """
    # Set the random seeds for reproducibility. DO NOT CHANGE.
    tf.set_random_seed(42)
    np.random.seed(42)

    ########################
    # PUT YOUR CODE HERE  #
    ########################

    # Cifar10 stuff
    cifar10, image_shape, num_classes = standard_cifar10_get(FLAGS)

    # Placeholder variables
    x = tf.placeholder(tf.float32, shape=[None] + list(image_shape), name='x')
    y = tf.placeholder(tf.float32, shape=(None, num_classes), name='y')
    is_training = tf.placeholder(dtype=tf.bool, shape=(), name='isTraining')

    # CNN model
    model = ConvNet(is_training=is_training,
                    dropout_rate=FLAGS.dropout_rate,
                    save_stuff=FLAGS.save_stuff,
                    fc_reg_str=FLAGS.fc_reg_str)

    # Get logits, loss, accuracy, train optimisation step
    logits = model.inference(x)
    accuracy = model.accuracy(logits, y)
    reg_loss = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    loss = model.loss(logits, y) + reg_loss
    tf.scalar_summary('loss_incl_reg', loss)
    train_op = train_step(loss)

    # Function for getting feed dicts
    def get_feed(c, train=True):
        if train:
            xd, yd = c.train.next_batch(FLAGS.batch_size)
            return {x: xd, y: yd, is_training: True}
        else:
            xd, yd = c.test.images[:FLAGS.test_size], c.test.labels[:FLAGS.
                                                                    test_size]
            return {x: xd, y: yd, is_training: False}

    # For saving checkpoints
    saver = tf.train.Saver()

    with tf.Session() as sess:

        # Initialise all variables
        tf.initialize_all_variables().run(session=sess)

        # Merge all the summaries
        merged = tf.merge_all_summaries()
        if FLAGS.save_stuff:
            train_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/train',
                                                  sess.graph)
            test_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/test')

        # Start training loops
        for epoch in range(0, FLAGS.max_steps):
            if epoch % 100 == 0:

                # Print accuracy and loss on test set
                summary, acc, loss_val = \
                    sess.run([merged, accuracy, loss], get_feed(cifar10, False))

                if FLAGS.save_stuff:
                    test_writer.add_summary(summary, epoch)

                print('\nEpoch', epoch, '\nTest accuracy:', acc,
                      '\nTest loss    :', loss_val)

            if epoch % FLAGS.checkpoint_freq == 0:
                # Save model checkpoint
                if epoch > 0:
                    save_path = saver.save(sess, FLAGS.checkpoint_dir + \
                                           '/epoch'+ str(epoch) + '.ckpt')
                    print("Model saved in file: %s" % save_path)

            # Do training update
            if FLAGS.save_stuff:
                summary, _ = sess.run([merged, train_op],
                                      feed_dict=get_feed(cifar10, True))
                train_writer.add_summary(summary, epoch)
            else:
                sess.run([train_op], feed_dict=get_feed(cifar10, True))

        # Print the final accuracy
        summary, acc, loss_val = \
            sess.run([merged, accuracy, loss], get_feed(cifar10, False))

        if FLAGS.save_stuff:
            test_writer.add_summary(summary, epoch + 1)
        print('\nFinal test accuracy:', acc, '\nFinal test loss    :',
              loss_val)

        save_path = saver.save(sess, FLAGS.checkpoint_dir + \
                               '/epoch'+ str(epoch + 1) + '.ckpt')
        print("Model saved in file: %s" % save_path)
Пример #3
0
def feature_extraction():
    """
    This method restores a TensorFlow checkpoint file (.ckpt) and rebuilds inference
    model with restored parameters. From then on you can basically use that model in
    any way you want, for instance, feature extraction, finetuning or as a submodule
    of a larger architecture. However, this method should extract features from a
    specified layer and store them in data files such as '.h5', '.npy'/'.npz'
    depending on your preference. You will use those files later in the assignment.

    Args:
        [optional]
    Returns:
        None
    """

    ########################
    # PUT YOUR CODE HERE  #
    ########################

    tf.reset_default_graph()
    classes = [
        'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',
        'truck'
    ]

    tf.set_random_seed(42)
    np.random.seed(42)
    cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
    x_test, y_test = cifar10.test.images, cifar10.test.labels
    y_test = np.argmax(y_test, axis=1)
    input_data_dim = cifar10.test.images.shape[1]
    n_classes = 10
    cnn = ConvNet()
    cnn.dropout_rate = 1.0
    x = tf.placeholder(tf.float32,
                       shape=(None, input_data_dim, input_data_dim, 3),
                       name="x")
    y = tf.placeholder(tf.float32, shape=(None, n_classes), name="y")
    cnn.is_training = tf.placeholder(tf.bool)
    with tf.name_scope('train_cnn'):
        infs = cnn.inference(x)
        flatten = cnn.flatten
        fc1 = cnn.fc1
        fc2 = cnn.fc2

    with tf.Session() as sess:

        saver = tf.train.Saver()

        saver.restore(sess, FLAGS.checkpoint_dir + '/cnn_model.ckpt')

        fc2_features = sess.run([fc2],
                                feed_dict={
                                    x: x_test,
                                    cnn.is_training: False
                                })[0]

        # _plot_tsne("fc2.png", fc2_features, y_test)

        fc1_features = sess.run([fc1],
                                feed_dict={
                                    x: x_test,
                                    cnn.is_training: False
                                })[0]
        # _plot_tsne("fc1.png",  fc1_features, y_test)

        flatten_features = sess.run([flatten],
                                    feed_dict={
                                        x: x_test,
                                        cnn.is_training: False
                                    })[0]
        # _plot_tsne("flatten.png", flatten_features, y_test)

    _train_one_vs_all(fc2_features, y_test, "FC2", classes)
    _train_one_vs_all(fc1_features, y_test, "FC1", classes)
    _train_one_vs_all(flatten_features, y_test, "Flatten", classes)
Пример #4
0
def train():
    """
    Performs training and evaluation of ConvNet model.

    First define your graph using class ConvNet and its methods. Then define
    necessary operations such as trainer (train_step in this case), savers
    and summarizers. Finally, initialize your model within a tf.Session and
    do the training.

    ---------------------------
    How to evaluate your model:
    ---------------------------
    Evaluation on test set should be conducted over full batch, i.e. 10k images,
    while it is alright to do it over minibatch for train set.

    ---------------------------------
    How often to evaluate your model:
    ---------------------------------
    - on training set every print_freq iterations
    - on test set every eval_freq iterations

    ------------------------
    Additional requirements:
    ------------------------
    Also you are supposed to take snapshots of your model state (i.e. graph,
    weights and etc.) every checkpoint_freq iterations. For this, you should
    study TensorFlow's tf.train.Saver class. For more information, please
    checkout:
    [https://www.tensorflow.org/versions/r0.11/how_tos/variables/index.html]
    """

    # Set the random seeds for reproducibility. DO NOT CHANGE.
    tf.set_random_seed(42)
    np.random.seed(42)

    ########################
    # PUT YOUR CODE HERE  #
    ########################

    cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
    x_test, y_test = cifar10.test.images[0:1000], cifar10.test.labels[0:1000]

    #### PARAMETERS
    classes = [
        'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',
        'truck'
    ]
    n_classes = len(classes)
    input_data_dim = cifar10.test.images.shape[1]
    #####

    cnn = ConvNet()
    cnn.is_training = tf.placeholder(tf.bool)
    cnn.dropout_rate = 0.5
    x = tf.placeholder(tf.float32,
                       shape=(None, input_data_dim, input_data_dim, 3),
                       name="x")
    y = tf.placeholder(tf.float32, shape=(None, n_classes), name="y")

    with tf.name_scope('train_cnn'):
        infs = cnn.inference(x)
        with tf.name_scope('cross-entropy-loss'):
            loss = cnn.loss(infs, y)
        with tf.name_scope('accuracy'):
            accuracy = cnn.accuracy(infs, y)
        fc2 = cnn.fc2
        merged = tf.merge_all_summaries()
        opt_operation = train_step(loss)

    with tf.Session() as sess:
        saver = tf.train.Saver()

        sess.run(tf.initialize_all_variables())

        test_acc = sess.run(accuracy,
                            feed_dict={
                                x: x_test,
                                y: y_test,
                                cnn.is_training: False
                            })
        print("Initial Test Accuracy = {0:.3f}".format(test_acc))

        train_writer = tf.train.SummaryWriter(FLAGS.log_dir + "/train/",
                                              sess.graph)
        test_writer = tf.train.SummaryWriter(FLAGS.log_dir + "/test/",
                                             sess.graph)

        for iteration in range(FLAGS.max_steps + 1):
            x_batch, y_batch = cifar10.train.next_batch(FLAGS.batch_size)
            _ = sess.run([opt_operation],
                         feed_dict={
                             x: x_batch,
                             y: y_batch,
                             cnn.is_training: False
                         })

            if iteration % FLAGS.print_freq == 0:
                [train_acc, train_loss,
                 summary_train] = sess.run([accuracy, loss, merged],
                                           feed_dict={
                                               x: x_batch,
                                               y: y_batch,
                                               cnn.is_training: False
                                           })
                train_writer.add_summary(summary_train, iteration)
                print(
                    "Iteration {0:d}/{1:d}. Train Loss = {2:.3f}, Train Accuracy = {3:.3f}"
                    .format(iteration, FLAGS.max_steps, train_loss, train_acc))

            if iteration % FLAGS.eval_freq == 0:
                [test_acc, test_loss,
                 summary_test] = sess.run([accuracy, loss, merged],
                                          feed_dict={
                                              x: x_test,
                                              y: y_test,
                                              cnn.is_training: False
                                          })
                test_writer.add_summary(summary_test, iteration)
                print(
                    "Iteration {0:d}/{1:d}. Test Loss = {2:.3f}, Test Accuracy = {3:.3f}"
                    .format(iteration, FLAGS.max_steps, test_loss, test_acc))

            if iteration > 0 and iteration % FLAGS.checkpoint_freq == 0:
                saver.save(sess, FLAGS.checkpoint_dir + '/cnn_model.ckpt')

        train_writer.flush()
        test_writer.flush()
        train_writer.close()
        test_writer.close()

        test_acc = sess.run(accuracy,
                            feed_dict={
                                x: x_test,
                                y: y_test,
                                cnn.is_training: False
                            })
        print("Final Test Accuracy = {0:.3f}".format(test_acc))

        sess.close()
Пример #5
0
def train():
    """
    Performs training and evaluation of ConvNet model.

    First define your graph using class ConvNet and its methods. Then define
    necessary operations such as trainer (train_step in this case), savers
    and summarizers. Finally, initialize your model within a tf.Session and
    do the training.

    ---------------------------
    How to evaluate your model:
    ---------------------------
    Evaluation on test set should be conducted over full batch, i.e. 10k images,
    while it is alright to do it over minibatch for train set.

    ---------------------------------
    How often to evaluate your model:
    ---------------------------------
    - on training set every print_freq iterations
    - on test set every eval_freq iterations

    ------------------------
    Additional requirements:
    ------------------------
    Also you are supposed to take snapshots of your model state (i.e. graph,
    weights and etc.) every checkpoint_freq iterations. For this, you should
    study TensorFlow's tf.train.Saver class. For more information, please
    checkout:
    [https://www.tensorflow.org/versions/r0.11/how_tos/variables/index.html]
    """

    # Set the random seeds for reproducibility. DO NOT CHANGE.
    tf.set_random_seed(42)
    np.random.seed(42)

    ########################
    # PUT YOUR CODE HERE  #
    ########################
    #first lets test that out model works:

    #initialize:

    weight_init_scale = 0.001
    cifar10 = cifar10_utils.get_cifar10(validation_size=100)

    cnet = ConvNet(10)

    x_in = tf.placeholder(tf.float32, [None, 32, 32, 3])
    y_true = tf.placeholder(tf.float32, [None, 10])

    with tf.variable_scope("ConvNet", reuse=None):
        filter1 = tf.get_variable("filter1",
                                  initializer=tf.random_normal(
                                      [5, 5, 3, 64],
                                      stddev=weight_init_scale,
                                      dtype=tf.float32))
        filter2 = tf.get_variable("filter2",
                                  initializer=tf.random_normal(
                                      [5, 5, 64, 64],
                                      stddev=weight_init_scale,
                                      dtype=tf.float32))

        W1 = tf.get_variable("W1",
                             initializer=tf.random_normal(
                                 [4096, 384],
                                 stddev=weight_init_scale,
                                 dtype=tf.float32))
        W2 = tf.get_variable("W2",
                             initializer=tf.random_normal(
                                 [384, 192],
                                 stddev=weight_init_scale,
                                 dtype=tf.float32))
        W3 = tf.get_variable("W3",
                             initializer=tf.random_normal(
                                 [192, 10],
                                 stddev=weight_init_scale,
                                 dtype=tf.float32))

    sess = tf.Session()
    saver = tf.train.Saver()
    #define things
    logits, flatten, fc1, fc2 = cnet.inference(x_in)

    loss = cnet.loss(logits, y_true)

    acc = cnet.accuracy(logits, y_true)
    opt_iter = train_step(loss)
    sess.run(tf.initialize_all_variables())

    swriter = tf.train.SummaryWriter(FLAGS.log_dir + '/ConvNet', sess.graph)

    #xbat, ybat = cifar10.train.next_batch(100)

    #begin the training
    with sess:

        # loop
        for i in range(FLAGS.max_steps + 1):
            xbat, ybat = cifar10.train.next_batch(FLAGS.batch_size)
            sess.run(opt_iter, feed_dict={x_in: xbat, y_true: ybat})
            if i % FLAGS.print_freq == 0:
                xbat, ybat = cifar10.validation.next_batch(100)
                val_acc, val_loss = sess.run([acc, loss],
                                             feed_dict={
                                                 x_in: xbat,
                                                 y_true: ybat
                                             })

                sys.stderr.write("iteration : " + str(i) +
                                 ", validation loss : " + str(val_loss) +
                                 ", validation_accuracy" + str(val_acc) + "\n")
                swriter.add_summary(
                    sess.run(tf.scalar_summary("accuracy", val_acc),
                             feed_dict={
                                 x_in: xbat,
                                 y_true: ybat
                             }), i)

            if i % FLAGS.checkpoint_freq == 0:
                lo, flatsave, fc1save, fc2save = sess.run(cnet.inference(x_in),
                                                          feed_dict={
                                                              x_in: xbat,
                                                              y_true: ybat
                                                          })
                np.save(FLAGS.checkpoint_dir + "/ConvNet/flatten", flatsave)
                np.save(FLAGS.checkpoint_dir + "/ConvNet/fc1", fc1save)
                np.save(FLAGS.checkpoint_dir + "/ConvNet/fc2", fc2save)
                np.save(FLAGS.checkpoint_dir + "/ConvNet/labels", ybat)
                saver.save(
                    sess,
                    FLAGS.checkpoint_dir + "/ConvNet/" + "checkpoint.ckpt")

            if i % FLAGS.eval_freq == 0:
                xbat, ybat = cifar10.test.next_batch(100)

                sys.stderr.write(
                    "test accuracy:" +
                    str(sess.run(acc, feed_dict={
                        x_in: xbat,
                        y_true: ybat
                    })) + "\n")