Example #1
0
def my_training_task4(X_train,
                      y_train,
                      X_val,
                      y_val,
                      conv_featmap=[6],
                      fc_units=[84],
                      conv_kernel_size=[5],
                      pooling_size=[2],
                      l2_norm=0.01,
                      seed=235,
                      learning_rate=1e-2,
                      epoch=20,
                      batch_size=245,
                      verbose=False,
                      pre_trained_model=None):
    # TODO: Copy my_training function, make modifications so that it uses your
    # data generator from task 4 to train.
    print("Building my LeNet. Parameters: ")
    print("conv_featmap={}".format(conv_featmap))
    print("fc_units={}".format(fc_units))
    print("conv_kernel_size={}".format(conv_kernel_size))
    print("pooling_size={}".format(pooling_size))
    print("l2_norm={}".format(l2_norm))
    print("seed={}".format(seed))
    print("learning_rate={}".format(learning_rate))
    num_of_samples, height, width, channels = X_train.shape
    output_size = np.unique(y_train).shape[0]

    # define the variables and parameter needed during training
    with tf.name_scope('inputs'):
        xs = tf.placeholder(shape=[None, height, width, channels],
                            dtype=tf.float32)
        ys = tf.placeholder(shape=[
            None,
        ], dtype=tf.int64)

    output, loss = my_LeNet(xs,
                            ys,
                            img_len=32,
                            channel_num=channels,
                            output_size=output_size,
                            conv_featmap=conv_featmap,
                            fc_units=fc_units,
                            conv_kernel_size=conv_kernel_size,
                            pooling_size=pooling_size,
                            l2_norm=l2_norm,
                            seed=seed)

    iters = int(X_train.shape[0] / batch_size)
    print('number of batches for training: {}'.format(iters))

    step = train_step(loss)
    eve = evaluate(output, ys)

    iter_total = 0
    best_acc = 0
    cur_model_name = 'lenet_{}'.format(int(time.time()))

    with tf.Session() as sess:
        merge = tf.summary.merge_all()

        writer = tf.summary.FileWriter("log/{}".format(cur_model_name),
                                       sess.graph)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        # try to restore the pre_trained
        if pre_trained_model is not None:
            try:
                print("Load the model from: {}".format(pre_trained_model))
                saver.restore(sess, 'model/{}'.format(pre_trained_model))
            except Exception:
                print("Load model Failed!")
                pass

        #bat = batch_size/4
        print("Generating data")
        origin = ImageGenerator(X_train, y_train)
        #flip = ImageGenerator(X_train,y_train)
        #flip.flip(mode='h')
        #noise = ImageGenerator(X_train,y_train)
        #noise.add_noise(portion=1, amplitude=1)
        #rotate = ImageGenerator(X_train,y_train)
        #rotate.rotate(angle=45)
        translate = ImageGenerator(X_train, y_train)
        generator = []
        generator.append(origin.next_batch_gen(batch_size))
        #generator.append(flip.next_batch_gen(batch_size))
        #generator.append(noise.next_batch_gen(batch_size))
        #generator.append(rotate.next_batch_gen(batch_size))
        generator.append(translate.next_batch_gen(batch_size))
        print("Data generation finished")

        for epc in range(epoch):
            print("epoch {} ".format(epc + 1))
            shift = np.random.choice(np.arange(1, 11), size=2, replace=True)
            translate = ImageGenerator(X_train, y_train)
            translate.translate(shift_height=shift[0], shift_width=shift[1])
            generator[-1] = translate.next_batch_gen(batch_size)

            for itr in range(iters):
                iter_total += 1
                #index = np.random.choice(X_train.shape[0],batch_size)

                #training_batch_x = X_train[index]
                #training_batch_y = y_train[index]
                training_batch_x, training_batch_y = next(
                    generator[itr % len(generator)])

                _, cur_loss = sess.run([step, loss],
                                       feed_dict={
                                           xs: training_batch_x,
                                           ys: training_batch_y
                                       })

                if iter_total % 100 == 0:
                    # do validation
                    valid_eve, merge_result = sess.run([eve, merge],
                                                       feed_dict={
                                                           xs: X_val,
                                                           ys: y_val
                                                       })
                    valid_acc = 100 - valid_eve * 100 / y_val.shape[0]
                    if verbose:
                        print(
                            '{}/{} loss: {} validation accuracy : {}%'.format(
                                batch_size * (itr + 1), X_train.shape[0],
                                cur_loss, valid_acc))

                    # save the merge result summary
                    writer.add_summary(merge_result, iter_total)

                    # when achieve the best validation accuracy, we store the model paramters
                    if valid_acc > best_acc:
                        print(
                            'Best validation accuracy! iteration:{} accuracy: {}%'
                            .format(iter_total, valid_acc))
                        best_acc = valid_acc
                        saver.save(sess, 'model/{}'.format(cur_model_name))

    print(
        "Traning ends. The best valid accuracy is {}. Model named {}.".format(
            best_acc, cur_model_name))
Example #2
0
def my_training_task4(X_train,
                      y_train,
                      X_val,
                      y_val,
                      conv_featmap=[6],
                      fc_units=[84],
                      conv_kernel_size=[5],
                      dropout_rate=[0.1],
                      pooling_size=[2],
                      l2_norm=0.01,
                      seed=235,
                      learning_rate=1e-2,
                      epoch=20,
                      batch_size=245,
                      verbose=False,
                      pre_trained_model=None):
    # TODO: Copy my_training function, make modifications so that it uses your
    # data generator from task 4 to train.
    #raise NotImplementedError

    print("Building my LeNet. Parameters: ")
    print("conv_featmap={}".format(conv_featmap))
    print("fc_units={}".format(fc_units))
    print("conv_kernel_size={}".format(conv_kernel_size))
    print("pooling_size={}".format(pooling_size))
    print("l2_norm={}".format(l2_norm))
    print("seed={}".format(seed))
    print("learning_rate={}".format(learning_rate))

    imageGe = ImageGenerator(X_train, y_train)

    # define the variables and parameter needed during training
    with tf.name_scope('inputs'):
        xs = tf.placeholder(shape=[None, 32, 32, 3], dtype=tf.float32)
        ys = tf.placeholder(shape=[
            None,
        ], dtype=tf.int64)

    output, loss = my_LeNet(xs,
                            ys,
                            img_len=32,
                            channel_num=3,
                            output_size=10,
                            conv_featmap=conv_featmap,
                            fc_units=fc_units,
                            conv_kernel_size=conv_kernel_size,
                            dropout_rate=dropout_rate,
                            pooling_size=pooling_size,
                            l2_norm=l2_norm,
                            seed=seed)

    iters = int(X_train.shape[0] / batch_size)
    print('number of batches for training: {}'.format(iters))

    step = train_step(loss)
    eve = evaluate(output, ys)

    iter_total = 0
    best_acc = 0
    cur_model_name = 'lenet_{}'.format(int(time.time()))

    with tf.Session() as sess:
        merge = tf.summary.merge_all()

        writer = tf.summary.FileWriter("log/{}".format(cur_model_name),
                                       sess.graph)
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())

        # try to restore the pre_trained
        if pre_trained_model is not None:
            try:
                print("Load the model from: {}".format(pre_trained_model))
                saver.restore(sess, 'model/{}'.format(pre_trained_model))
            except Exception:
                print("Load model Failed!")
                pass

        for epc in range(epoch):
            print("epoch {} ".format(epc + 1))

            for itr in range(iters):
                iter_total += 1

                #training_batch_x = X_train[itr * batch_size: (1 + itr) * batch_size]
                #training_batch_y = y_train[itr * batch_size: (1 + itr) * batch_size]

                training_batch_x, training_batch_y = next(
                    imageGe.next_batch_gen(batch_size, shuffle=False))

                _, cur_loss = sess.run([step, loss],
                                       feed_dict={
                                           xs: training_batch_x,
                                           ys: training_batch_y
                                       })

                if iter_total % 100 == 0:
                    # do validation
                    valid_eve, merge_result = sess.run([eve, merge],
                                                       feed_dict={
                                                           xs: X_val,
                                                           ys: y_val
                                                       })
                    valid_acc = 100 - valid_eve * 100 / y_val.shape[0]
                    if verbose:
                        print(
                            '{}/{} loss: {} validation accuracy : {}%'.format(
                                batch_size * (itr + 1), X_train.shape[0],
                                cur_loss, valid_acc))

                    # save the merge result summary
                    writer.add_summary(merge_result, iter_total)

                    # when achieve the best validation accuracy, we store the model paramters
                    if valid_acc > best_acc:
                        print(
                            'Best validation accuracy! iteration:{} accuracy: {}%'
                            .format(iter_total, valid_acc))
                        best_acc = valid_acc
                        saver.save(sess, 'model/{}'.format(cur_model_name))
            if (epc % 4 == 1):
                imageGe.translate(4, 4)
            elif (epc % 4 == 2):
                imageGe.rotate(90)
            elif (epc % 4 == 3):
                imageGe.flip('hv')
            elif (epc % 4 == 0):
                imageGe = ImageGenerator(X_train, y_train)

    print(
        "Traning ends. The best valid accuracy is {}. Model named {}.".format(
            best_acc, cur_model_name))