Exemple #1
0
def my_training_task4(X_train,
                      y_train,
                      X_val,
                      y_val,
                      conv_featmap=[6],
                      fc_units=[84],
                      conv_kernel_size=[5],
                      pooling_size=[2],
                      l2_norm=0.01,
                      seed=235,
                      learning_rate=1e-3,
                      epoch=20,
                      batch_size=245,
                      verbose=False,
                      pre_trained_model=None):
    print("Building my LeNet. Parameters: ")
    print("conv_featmap={}".format(conv_featmap))
    print("fc_units={}".format(fc_units))
    print("conv_kernel_size={}".format(conv_kernel_size))
    print("pooling_size={}".format(pooling_size))
    print("l2_norm={}".format(l2_norm))
    print("seed={}".format(seed))
    print("learning_rate={}".format(learning_rate))

    Train_aug = ImageGenerator(x=X_train, y=y_train)
    Train_aug_f = Train_aug.flip(mode='hv')
    Train_aug_r = Train_aug.rotate(angle=90)
    X_train_aug = np.vstack((X_train, Train_aug_f[0]))
    y_train_aug = np.hstack((y_train, Train_aug_f[1]))
    X_train_aug = np.vstack((X_train_aug, Train_aug_r[0]))
    y_train_aug = np.hstack((y_train_aug, Train_aug_r[1]))
    X_train = X_train_aug
    y_train = y_train_aug

    # define the variables and parameter needed during training
    with tf.name_scope('inputs'):
        xs = tf.placeholder(shape=[None, 32, 32, 3], dtype=tf.float32)
        ys = tf.placeholder(shape=[
            None,
        ], dtype=tf.int64)
        is_training = tf.placeholder(tf.bool, name='is_training')

    output, loss = my_LeNet(xs,
                            ys,
                            is_training,
                            img_len=32,
                            channel_num=3,
                            output_size=10,
                            conv_featmap=conv_featmap,
                            fc_units=fc_units,
                            conv_kernel_size=conv_kernel_size,
                            pooling_size=pooling_size,
                            l2_norm=l2_norm,
                            keep_prob=0.7,
                            seed=seed)

    iters = int(X_train.shape[0] / batch_size)
    print('number of batches for training: {}'.format(iters))

    step = train_step(loss)
    eve = evaluate(output, ys)

    iter_total = 0
    best_acc = 0
    cur_model_name = 'lenet_{}'.format(int(time.time()))

    with tf.Session() as sess:
        merge = tf.summary.merge_all()

        writer = tf.summary.FileWriter("log/{}".format(cur_model_name),
                                       sess.graph)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        # try to restore the pre_trained
        if pre_trained_model is not None:
            try:
                print("Load the model from: {}".format(pre_trained_model))
                saver.restore(sess, 'model/{}'.format(pre_trained_model))
            except Exception:
                raise ValueError("Load model Failed!")

        for epc in range(epoch):
            print("epoch {} ".format(epc + 1))

            for itr in range(iters):
                iter_total += 1

                training_batch_x = X_train[itr * batch_size:(1 + itr) *
                                           batch_size]
                training_batch_y = y_train[itr * batch_size:(1 + itr) *
                                           batch_size]

                _, cur_loss = sess.run([step, loss],
                                       feed_dict={
                                           xs: training_batch_x,
                                           ys: training_batch_y,
                                           is_training: True
                                       })

                if iter_total % 100 == 0:
                    # do validation
                    valid_eve, merge_result = sess.run([eve, merge],
                                                       feed_dict={
                                                           xs: X_val,
                                                           ys: y_val,
                                                           is_training: False
                                                       })
                    valid_acc = 100 - valid_eve * 100 / y_val.shape[0]
                    if verbose:
                        print(
                            '{}/{} loss: {} validation accuracy : {}%'.format(
                                batch_size * (itr + 1), X_train.shape[0],
                                cur_loss, valid_acc))

                    # save the merge result summary
                    writer.add_summary(merge_result, iter_total)

                    # when achieve the best validation accuracy, we store the model paramters
                    if valid_acc > best_acc:
                        print(
                            'Best validation accuracy! iteration:{} accuracy: {}%'
                            .format(iter_total, valid_acc))
                        best_acc = valid_acc
                        saver.save(sess, 'model/{}'.format(cur_model_name))

    print(
        "Traning ends. The best valid accuracy is {}. Model named {}.".format(
            best_acc, cur_model_name))
Exemple #2
0
def my_training_task4(X_train,
                      y_train,
                      X_val,
                      y_val,
                      conv_featmap=[6],
                      fc_units=[84],
                      conv_kernel_size=[5],
                      dropout_rate=[0.1],
                      pooling_size=[2],
                      l2_norm=0.01,
                      seed=235,
                      learning_rate=1e-2,
                      epoch=20,
                      batch_size=245,
                      verbose=False,
                      pre_trained_model=None):
    # TODO: Copy my_training function, make modifications so that it uses your
    # data generator from task 4 to train.
    #raise NotImplementedError

    print("Building my LeNet. Parameters: ")
    print("conv_featmap={}".format(conv_featmap))
    print("fc_units={}".format(fc_units))
    print("conv_kernel_size={}".format(conv_kernel_size))
    print("pooling_size={}".format(pooling_size))
    print("l2_norm={}".format(l2_norm))
    print("seed={}".format(seed))
    print("learning_rate={}".format(learning_rate))

    imageGe = ImageGenerator(X_train, y_train)

    # define the variables and parameter needed during training
    with tf.name_scope('inputs'):
        xs = tf.placeholder(shape=[None, 32, 32, 3], dtype=tf.float32)
        ys = tf.placeholder(shape=[
            None,
        ], dtype=tf.int64)

    output, loss = my_LeNet(xs,
                            ys,
                            img_len=32,
                            channel_num=3,
                            output_size=10,
                            conv_featmap=conv_featmap,
                            fc_units=fc_units,
                            conv_kernel_size=conv_kernel_size,
                            dropout_rate=dropout_rate,
                            pooling_size=pooling_size,
                            l2_norm=l2_norm,
                            seed=seed)

    iters = int(X_train.shape[0] / batch_size)
    print('number of batches for training: {}'.format(iters))

    step = train_step(loss)
    eve = evaluate(output, ys)

    iter_total = 0
    best_acc = 0
    cur_model_name = 'lenet_{}'.format(int(time.time()))

    with tf.Session() as sess:
        merge = tf.summary.merge_all()

        writer = tf.summary.FileWriter("log/{}".format(cur_model_name),
                                       sess.graph)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        # try to restore the pre_trained
        if pre_trained_model is not None:
            try:
                print("Load the model from: {}".format(pre_trained_model))
                saver.restore(sess, 'model/{}'.format(pre_trained_model))
            except Exception:
                print("Load model Failed!")
                pass

        for epc in range(epoch):
            print("epoch {} ".format(epc + 1))

            for itr in range(iters):
                iter_total += 1

                #training_batch_x = X_train[itr * batch_size: (1 + itr) * batch_size]
                #training_batch_y = y_train[itr * batch_size: (1 + itr) * batch_size]

                training_batch_x, training_batch_y = next(
                    imageGe.next_batch_gen(batch_size, shuffle=False))

                _, cur_loss = sess.run([step, loss],
                                       feed_dict={
                                           xs: training_batch_x,
                                           ys: training_batch_y
                                       })

                if iter_total % 100 == 0:
                    # do validation
                    valid_eve, merge_result = sess.run([eve, merge],
                                                       feed_dict={
                                                           xs: X_val,
                                                           ys: y_val
                                                       })
                    valid_acc = 100 - valid_eve * 100 / y_val.shape[0]
                    if verbose:
                        print(
                            '{}/{} loss: {} validation accuracy : {}%'.format(
                                batch_size * (itr + 1), X_train.shape[0],
                                cur_loss, valid_acc))

                    # save the merge result summary
                    writer.add_summary(merge_result, iter_total)

                    # when achieve the best validation accuracy, we store the model paramters
                    if valid_acc > best_acc:
                        print(
                            'Best validation accuracy! iteration:{} accuracy: {}%'
                            .format(iter_total, valid_acc))
                        best_acc = valid_acc
                        saver.save(sess, 'model/{}'.format(cur_model_name))
            if (epc % 4 == 1):
                imageGe.translate(4, 4)
            elif (epc % 4 == 2):
                imageGe.rotate(90)
            elif (epc % 4 == 3):
                imageGe.flip('hv')
            elif (epc % 4 == 0):
                imageGe = ImageGenerator(X_train, y_train)

    print(
        "Traning ends. The best valid accuracy is {}. Model named {}.".format(
            best_acc, cur_model_name))