Пример #1
0
def train_siamese():
    """
    Performs training and evaluation of Siamese model.

    First define your graph using class Siamese and its methods. Then define
    necessary operations such as trainer (train_step in this case), savers
    and summarizers. Finally, initialize your model within a tf.Session and
    do the training.

    ---------------------------
    How to evaluate your model:
    ---------------------------
    On train set, it is fine to monitor loss over minibatches. On the other
    hand, in order to evaluate on test set you will need to create a fixed
    validation set using the data sampling function you implement for siamese
    architecture. What you need to do is to iterate over all minibatches in
    the validation set and calculate the average loss over all minibatches.

    ---------------------------------
    How often to evaluate your model:
    ---------------------------------
    - on training set every print_freq iterations
    - on test set every eval_freq iterations

    ------------------------
    Additional requirements:
    ------------------------
    Also you are supposed to take snapshots of your model state (i.e. graph,
    weights and etc.) every checkpoint_freq iterations. For this, you should
    study TensorFlow's tf.train.Saver class. For more information, please
    checkout:
    [https://www.tensorflow.org/versions/r0.11/how_tos/variables/index.html]
    """

    # Set the random seeds for reproducibility. DO NOT CHANGE.

    def _check_loss(data):
        loss_val = 0.
        for batch in data:
            x1_data, x2_data, y_data = batch
            [curr_loss] = sess.run([loss],
                                   feed_dict={
                                       x1: x1_data,
                                       x2: x2_data,
                                       y: y_data
                                   })
            loss_val += curr_loss
            # test_wrt.add_summary(summary_test, it)
        # print(len(data))
        # print(loss_val)
        return loss_val / len(data)

    tf.set_random_seed(42)
    np.random.seed(42)
    cifar10 = get_cifar_10_siamese(FLAGS.data_dir, validation_size=5000)

    val_data = create_dataset_siamese(cifar10.validation, num_tuples=500)
    test_data = create_dataset_siamese(cifar10.test, num_tuples=500)

    #### PARAMETERS
    classes = [
        'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',
        'truck'
    ]
    n_classes = len(classes)
    input_data_dim = cifar10.test.images.shape[1]
    #####

    cnn_siamese = Siamese()

    x1 = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name="x1")
    x2 = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name="x2")
    y = tf.placeholder(tf.float32, shape=[None], name="y")

    with tf.name_scope('train_cnn'):
        infs1 = cnn_siamese.inference(x1)
        infs2 = cnn_siamese.inference(x2, reuse=True)
        with tf.name_scope('cross-entropy-loss'):
            loss = cnn_siamese.loss(infs1, infs2, y, 0.48)
        merged = tf.merge_all_summaries()
        opt_operation = train_step(loss)

    with tf.Session() as sess:

        saver = tf.train.Saver()

        sess.run(tf.initialize_all_variables())

        #   test_loss = _check_loss(test_data)
        #   print("Initial Test Loss = {0:.3f}".format(test_loss))

        #train_writer = tf.train.SummaryWriter(FLAGS.log_dir + "/train/", sess.graph)
        #test_writer = tf.train.SummaryWriter(FLAGS.log_dir + "/test/", sess.graph)
        val_losses = []
        train_losses = []
        for iteration in range(FLAGS.max_steps + 1):

            x1_train, x2_train, y_train = cifar10.train.next_batch(
                FLAGS.batch_size)
            _ = sess.run([opt_operation],
                         feed_dict={
                             x1: x1_train,
                             x2: x2_train,
                             y: y_train
                         })

            if iteration % FLAGS.print_freq == 0:
                [train_loss] = sess.run([loss],
                                        feed_dict={
                                            x1: x1_train,
                                            x2: x2_train,
                                            y: y_train
                                        })
                train_losses.append(train_loss)
                #  train_writer.add_summary(summary_train, iteration)
                print("Iteration {0:d}/{1:d}. Train Loss = {2:.6f}".format(
                    iteration, FLAGS.max_steps, train_loss))

            if iteration % FLAGS.eval_freq == 0:
                val_loss = _check_loss(val_data)
                val_losses.append(val_loss)
                # [test_acc, test_loss, summary_test] = sess.run([accuracy, loss, merged], feed_dict={x: x_test, y: y_test})
                # test_writer.add_summary(summary_test, iteration)
                print(
                    "Iteration {0:d}/{1:d}. Validation Loss = {2:.6f}".format(
                        iteration, FLAGS.max_steps, val_loss))

            if iteration > 0 and iteration % FLAGS.checkpoint_freq == 0:
                saver.save(sess,
                           FLAGS.checkpoint_dir + '/cnn_model_siamese.ckpt')

    #    test_loss = _check_loss(test_data)
    #  print("Final Test Loss = {0:.3f}".format(test_loss))
    # train_writer.flush()
    # test_writer.flush()
    # train_writer.close()
    # test_writer.close()

        sess.close()
    print("train_loss", train_losses)
    print("val_loss", val_losses)
Пример #2
0
def train_siamese():
    """
    Performs training and evaluation of Siamese model.
    
    First define your graph using class Siamese and its methods. Then define
    necessary operations such as trainer (train_step in this case), savers
    and summarizers. Finally, initialize your model within a tf.Session and
    do the training.
    
    ---------------------------
    How to evaluate your model:
    ---------------------------
    On train set, it is fine to monitor loss over minibatches. On the other
    hand, in order to evaluate on test set you will need to create a fixed
    validation set using the data sampling function you implement for siamese
    architecture. What you need to do is to iterate over all minibatches in
    the validation set and calculate the average loss over all minibatches.
    
    ---------------------------------
    How often to evaluate your model:
    ---------------------------------
    - on training set every print_freq iterations
    - on test set every eval_freq iterations
    
    ------------------------
    Additional requirements:
    ------------------------
    Also you are supposed to take snapshots of your model state (i.e. graph,
    weights and etc.) every checkpoint_freq iterations. For this, you should
    study TensorFlow's tf.train.Saver class. For more information, please
    checkout:
    [https://www.tensorflow.org/versions/r0.11/how_tos/variables/index.html]
    """

    # Set the random seeds for reproducibility. DO NOT CHANGE.
    tf.set_random_seed(42)
    np.random.seed(42)

    ########################
    # PUT YOUR CODE HERE  #
    ########################

    # Cifar10 stuff
    cifar10, image_shape, num_classes = standard_cifar10_get(FLAGS)

    # Placeholder variables
    x1 = tf.placeholder(tf.float32,
                        shape=[None] + list(image_shape),
                        name='x1')
    x2 = tf.placeholder(tf.float32,
                        shape=[None] + list(image_shape),
                        name='x2')
    y = tf.placeholder(tf.float32, shape=(None), name='y')
    is_training = tf.placeholder(dtype=tf.bool, shape=(), name='isTraining')
    margin = tf.placeholder(tf.float32, shape=(), name='margin')

    # CNN model
    model = Siamese(is_training=is_training,
                    dropout_rate=FLAGS.dropout_rate,
                    save_stuff=FLAGS.save_stuff,
                    fc_reg_str=FLAGS.fc_reg_str)

    # Get outputs of two siamese models, loss, train optimisation step
    l2_out_1 = model.inference(x1)
    l2_out_2 = model.inference(x2, reuse=True)
    loss_no_reg, d2 = model.loss(l2_out_1, l2_out_2, y, margin)
    reg_loss = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    loss_w_reg = loss_no_reg + reg_loss
    accuracy = model.accuracy(d2, y, margin)
    tf.scalar_summary('loss_incl_reg', loss_w_reg)
    train_op = train_step(loss_w_reg)

    validation_tuples = create_dataset(
        cifar10.test,
        num_tuples=FLAGS.siamese_vali_ntuples,
        batch_size=FLAGS.batch_size,
        fraction_same=FLAGS.siamese_fraction_same)
    xv1, xv2, yv = np.vstack([i[0] for i in validation_tuples]),\
                   np.vstack([i[1] for i in validation_tuples]),\
                   np.hstack([i[2] for i in validation_tuples])

    num_val_chunks = 10
    assert (FLAGS.siamese_vali_ntuples % num_val_chunks) == 0
    chunks = range(0, xv1.shape[0], int(xv1.shape[0] / num_val_chunks)) + \
             [int(FLAGS.siamese_vali_ntuples * FLAGS.batch_size)]

    # Function for getting feed dicts
    def get_feed(c, train=True, chunk=None, chunks=None):
        if train == 'train' or train == 't':
            xd1, xd2, yd = \
                c.train.next_batch(FLAGS.batch_size, FLAGS.siamese_fraction_same)
            return {
                x1: xd1,
                x2: xd2,
                y: yd,
                is_training: True,
                margin: FLAGS.siamese_margin
            }
        elif train == 'vali' or train == 'v' or train == 'validation':
            if chunk is None:
                return {
                    x1: xv1,
                    x2: xv2,
                    y: yv,
                    is_training: False,
                    margin: FLAGS.siamese_margin
                }
            else:
                st, en = chunks[chunk], chunks[chunk + 1]
                return {
                    x1: xv1[st:en],
                    x2: xv2[st:en],
                    y: yv[st:en],
                    is_training: False,
                    margin: FLAGS.siamese_margin
                }
        else:
            pass
        # TODO Implement test set feed dict siamese

    # For saving checkpoints
    saver = tf.train.Saver()

    with tf.Session() as sess:

        # Initialise all variables
        tf.initialize_all_variables().run(session=sess)

        # Merge all the summaries
        merged = tf.merge_all_summaries()
        if FLAGS.save_stuff:
            train_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/train',
                                                  sess.graph)
            test_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/test')

        # Start training loops
        for epoch in range(0, FLAGS.max_steps):
            if epoch % 100 == 0:

                # Print accuracy and loss on validation set
                accuracies = []
                losses = []
                for i in range(num_val_chunks):
                    loss_val, acc = \
                        sess.run([loss_no_reg, accuracy],
                                 get_feed(cifar10, 'vali', i, chunks))
                    accuracies.append(acc)
                    losses.append(loss_val)

#              if FLAGS.save_stuff:
#                  test_writer.add_summary(summary, epoch)

                print('\nEpoch', epoch, '\nValidation accuracy:',
                      np.mean(accuracies), '\nValidation loss    :',
                      np.mean(losses))

            if epoch % FLAGS.checkpoint_freq == 0:
                # Save model checkpoint
                if epoch > 0:
                    save_path = saver.save(sess, FLAGS.checkpoint_dir + \
                                           '/epoch'+ str(epoch) + '.ckpt')
                    print("Model saved in file: %s" % save_path)

            # Do training update
            if FLAGS.save_stuff:
                summary, _ = sess.run([merged, train_op],
                                      feed_dict=get_feed(cifar10, 'train'))
                train_writer.add_summary(summary, epoch)
            else:
                sess.run([train_op], feed_dict=get_feed(cifar10, 'train'))

        # Print the final accuracy
        summary, loss_val = \
            sess.run([merged, loss_no_reg], get_feed(cifar10, 'vali'))

        if FLAGS.save_stuff:
            test_writer.add_summary(summary, epoch + 1)
        print('\nFinal validation loss    :', loss_val)
Пример #3
0
def train_siamese():
    """
    Performs training and evaluation of Siamese model.

    First define your graph using class Siamese and its methods. Then define
    necessary operations such as trainer (train_step in this case), savers
    and summarizers. Finally, initialize your model within a tf.Session and
    do the training.

    ---------------------------
    How to evaluate your model:
    ---------------------------
    On train set, it is fine to monitor loss over minibatches. On the other
    hand, in order to evaluate on test set you will need to create a fixed
    validation set using the data sampling function you implement for siamese
    architecture. What you need to do is to iterate over all minibatches in
    the validation set and calculate the average loss over all minibatches.

    ---------------------------------
    How often to evaluate your model:
    ---------------------------------
    - on training set every print_freq iterations
    - on test set every eval_freq iterations

    ------------------------
    Additional requirements:
    ------------------------
    Also you are supposed to take snapshots of your model state (i.e. graph,
    weights and etc.) every checkpoint_freq iterations. For this, you should
    study TensorFlow's tf.train.Saver class. For more information, please
    checkout:
    [https://www.tensorflow.org/versions/r0.11/how_tos/variables/index.html]
    """

    # Set the random seeds for reproducibility. DO NOT CHANGE.
    tf.set_random_seed(42)
    np.random.seed(42)

    ########################
    # PUT YOUR CODE HERE  #
    ########################

    weight_init_scale = 0.001
    cifar10 = cifar10_siamese_utils.get_cifar10(validation_size=500)

    cnet = Siamese()

    #swriter = tf.train.SummaryWriter(FLAGS.log_dir + "/Siamese/")

    x_anchor = tf.placeholder(tf.float32, [None, 32, 32, 3])
    x_in = tf.placeholder(tf.float32, [None, 32, 32, 3])
    y_true = tf.placeholder(tf.float32, [None])

    with tf.variable_scope("Siamese", reuse=None):
        filter1 = tf.get_variable("filter1",
                                  initializer=tf.random_normal(
                                      [5, 5, 3, 64],
                                      stddev=weight_init_scale,
                                      dtype=tf.float32))
        filter2 = tf.get_variable("filter2",
                                  initializer=tf.random_normal(
                                      [5, 5, 64, 64],
                                      stddev=weight_init_scale,
                                      dtype=tf.float32))

        W1 = tf.get_variable("W1",
                             initializer=tf.random_normal(
                                 [4096, 384],
                                 stddev=weight_init_scale,
                                 dtype=tf.float32))
        W2 = tf.get_variable("W2",
                             initializer=tf.random_normal(
                                 [384, 192],
                                 stddev=weight_init_scale,
                                 dtype=tf.float32))

    sess = tf.Session()
    saver = tf.train.Saver()
    #define things
    logits_anchor, _f1, _f2, _fl = cnet.inference(x_anchor)
    logits_in, _f1, _f2, _fl = cnet.inference(x_in)

    loss = cnet.loss(logits_anchor, logits_in, y_true, 1.0)

    opt_iter = train_step(loss)
    sess.run(tf.initialize_all_variables())

    #xbat, ybat = cifar10.train.next_batch(100)

    #begin the training
    with sess:

        # loop
        for i in range(FLAGS.max_steps + 1):
            ancbat, xbat, ybat = cifar10.train.next_batch(FLAGS.batch_size)

            sess.run(opt_iter,
                     feed_dict={
                         x_anchor: ancbat,
                         x_in: xbat,
                         y_true: ybat
                     })
            if i % FLAGS.print_freq == 0:

                ancbat, xbat, ybat = cifar10.validation.next_batch(100)
                val_loss = sess.run([loss],
                                    feed_dict={
                                        x_anchor: ancbat,
                                        x_in: xbat,
                                        y_true: ybat
                                    })

                sys.stderr.write("iteration : " + str(i) +
                                 ", validation loss : " + str(val_loss) + "\n")

                #swriter.add_summary(
                #    sess.run(tf.scalar_summary("loss", val_loss),
                #             feed_dict = {x_anchor: ancbat, x_in: xbat, y_true:ybat})
                #    ,i)

            if i % FLAGS.checkpoint_freq == 0:
                saver.save(
                    sess,
                    FLAGS.checkpoint_dir + "/Siamese/" + "checkpoint.ckpt")
                lo, flatsave, fc1save, fc2save = sess.run(cnet.inference(x_in),
                                                          feed_dict={
                                                              x_in: xbat,
                                                              y_true: ybat,
                                                              x_anchor: ancbat
                                                          })

                loa, flatsavea, fc1savea, fc2savea = sess.run(
                    cnet.inference(x_anchor),
                    feed_dict={
                        x_in: xbat,
                        y_true: ybat,
                        x_anchor: ancbat
                    })

                np.save(FLAGS.checkpoint_dir + "/Siamese/other", lo)
                np.save(FLAGS.checkpoint_dir + "/Siamese/anchor", loa)
                """
                np.save(FLAGS.checkpoint_dir +"/Siamese/flatten", flatsave)
                np.save(FLAGS.checkpoint_dir + "/Siamese/fc1", fc1save)
                np.save(FLAGS.checkpoint_dir + "/Siamese/fc2", fc2save)
        
                np.save(FLAGS.checkpoint_dir +"/Siamese/flattena", flatsavea)
                np.save(FLAGS.checkpoint_dir + "/Siamese/fc1a", fc1savea)
                np.save(FLAGS.checkpoint_dir + "/Siamese/fc2a", fc2savea)
                """
            if i % FLAGS.eval_freq == 0:
                ancbat, xbat, ybat = cifar10.test.next_batch(100)

                sys.stderr.write("test loss:" + str(
                    sess.run(loss,
                             feed_dict={
                                 x_anchor: ancbat,
                                 x_in: xbat,
                                 y_true: ybat
                             })) + "\n")