Exemple #1
0
def Training(batch_SIZE=None, time_step=None, Epoch=None, lr=None):

    X_train, Y_train, X_test, Y_test = tools.DataLoader(data_aug=True)

    VGG = VGG_10()

    x = tf.compat.v1.placeholder(dtype=tf.float32,
                                 shape=[batch_SIZE, time_step, 158, 238, 1])
    y = tf.compat.v1.placeholder(dtype=tf.float32,
                                 shape=[batch_SIZE, time_step, 158, 238, 1])

    x_reshape = tf.reshape(x, [-1, 158, 238, 1])

    LR = tf.compat.v1.placeholder(tf.float32)

    z = resize_and_adjust_channel(x_reshape, [316, 476], 3, "Start")
    z = VGG.forward(z)

    S_1 = Dense_Spatial_Block(z, "DSB_1")
    S_1 = Spatial_Channel_Aware_Block(S_1, "SCA_1")
    z_1 = S_1

    z = tf.reshape(z_1, [
        batch_SIZE, time_step,
        tf.shape(z)[1],
        tf.shape(z)[2],
        tf.shape(z)[3]
    ],
                   name="Reshape_S_T")

    T_1 = Dense_Temporal_Block(z, "DTB_1")
    T_1 = Temporal_Channel_Aware_Block(T_1, "TCA_1")
    z_1 = T_1

    z = tf.reshape(z_1, [-1, tf.shape(z)[2], tf.shape(z)[3], tf.shape(z)[4]])

    S_2 = Dense_Spatial_Block(z, "DSB_2")
    S_2 = Spatial_Channel_Aware_Block(S_2, "SCA_2")
    z_2 = S_2

    z = tf.reshape(z_2, [
        batch_SIZE, time_step,
        tf.shape(z)[1],
        tf.shape(z)[2],
        tf.shape(z)[3]
    ],
                   name="Reshape_S_T")

    T_2 = Dense_Temporal_Block(z, "DTB_2")
    T_2 = Temporal_Channel_Aware_Block(T_2, "TCA_2")
    z_2 = T_2

    z = tf.reshape(z_2, [-1, tf.shape(z)[2], tf.shape(z)[3], tf.shape(z)[4]])

    S_3 = Dense_Spatial_Block(z, "DSB_3")
    S_3 = Spatial_Channel_Aware_Block(S_3, "SCA_3")
    z_3 = S_3

    z = tf.reshape(z_3, [
        batch_SIZE, time_step,
        tf.shape(z)[1],
        tf.shape(z)[2],
        tf.shape(z)[3]
    ],
                   name="Reshape_S_T")

    T_3 = Dense_Temporal_Block(z, "DTB_3")
    T_3 = Temporal_Channel_Aware_Block(T_3, "TCA_3")
    z_3 = T_3

    z = tf.reshape(z_3, [-1, tf.shape(z)[2], tf.shape(z)[3], tf.shape(z)[4]])

    S_4 = Dense_Spatial_Block(z, "DSB_4")
    S_4 = Spatial_Channel_Aware_Block(S_4, "SCA_4")
    z_4 = S_4

    z = tf.reshape(z_4, [
        batch_SIZE, time_step,
        tf.shape(z)[1],
        tf.shape(z)[2],
        tf.shape(z)[3]
    ],
                   name="Reshape_S_T")

    T_4 = Dense_Temporal_Block(z, "DTB_4")
    T_4 = Temporal_Channel_Aware_Block(T_4, "TCA_4")
    z_4 = T_4

    z = tf.reshape(z_4, [-1, tf.shape(z)[2], tf.shape(z)[3], tf.shape(z)[4]])

    z = dilated_conv2d(z, 512, 128, 1, "128")
    z = dilated_conv2d(z, 128, 64, 1, "64")
    z = output_adjust(z, [158, 238], 1, "End")
    z = tf.reshape(z, [batch_SIZE, time_step, 158, 238, 1])

    all_variable = tf.trainable_variables()
    Reg_Loss = 1e-4 * tf.reduce_sum([tf.nn.l2_loss(v) for v in all_variable])

    cost = tools.PRL(z, y, "L1")
    cost = cost + Reg_Loss

    performance = tools.compute_MAE_and_MSE(z, y)

    optimizer = tf.compat.v1.train.AdamOptimizer(
        learning_rate=LR).minimize(cost)

    initial = tf.compat.v1.global_variables_initializer()

    with tf.compat.v1.Session() as sess:

        print(
            "-----------------------------------------------------------------------------\n"
        )
        print("\nStart Training...\n")
        print(
            "Number of parameters : ",
            np.sum([
                np.prod(v.get_shape().as_list())
                for v in tf.trainable_variables()
            ]), "\n")

        Time = 0
        seed = 0

        train_writer = tf.compat.v1.summary.FileWriter("./logs/train",
                                                       sess.graph)
        test_writer = tf.compat.v1.summary.FileWriter("./logs/test",
                                                      sess.graph)

        sess.graph.finalize()
        sess.run(initial)

        for epoch in range(Epoch + 1):

            if epoch == 30:
                lr = lr / 2
            if epoch == 60:
                lr = lr / 2
            if epoch == 100:
                lr = lr / 2

            start_time = time.time()

            mini_batch_cost = 0
            mini_batch_MAE = 0
            mini_batch_MSE = 0

            seed = seed + 1

            minibatches = tools.random_mini_batches(X_train,
                                                    Y_train,
                                                    batch_SIZE,
                                                    seed=seed)

            for data in minibatches:

                (X_train_batch, Y_train_batch) = data

                _, temp_cost, train_performance = sess.run(
                    [optimizer, cost, performance],
                    feed_dict={
                        x: X_train_batch,
                        y: Y_train_batch,
                        LR: lr,
                    })

                mini_batch_cost += temp_cost * batch_SIZE * time_step / (
                    X_train.shape[0] * X_train.shape[1])
                mini_batch_MAE += train_performance[0] / (X_train.shape[0] *
                                                          X_train.shape[1])
                mini_batch_MSE += train_performance[1] / (X_train.shape[0] *
                                                          X_train.shape[1])

            total_cost = round(mini_batch_cost, 7)
            total_MAE = round(mini_batch_MAE, 4)
            total_MSE = round(np.sqrt(mini_batch_MSE), 4)

            print("Epoch : ", epoch, " , Cost :  ", total_cost, " , MAE : ",
                  total_MAE, ", MSE : ", total_MSE)

            if True:

                test_cost, test_MAE, test_MSE = 0, 0, 0
                test_batches = tools.random_mini_batches(X_test,
                                                         Y_test,
                                                         batch_SIZE,
                                                         seed=seed)

                for i in test_batches:

                    (X_test_batch, Y_test_batch) = i

                    temp_cost, test_performance = sess.run([cost, performance],
                                                           feed_dict={
                                                               x: X_test_batch,
                                                               y: Y_test_batch,
                                                           })

                    test_cost += temp_cost * batch_SIZE * time_step / (
                        X_test.shape[0] * X_test.shape[1])
                    test_MAE += test_performance[0] / (X_test.shape[0] *
                                                       X_test.shape[1])
                    test_MSE += test_performance[1] / (X_test.shape[0] *
                                                       X_test.shape[1])

                test_cost = round(test_cost, 7)
                test_MAE = round(test_MAE, 4)
                test_MSE = round(np.sqrt(test_MSE), 4)

                print("Testing , cost :  ", test_cost, " , MAE : ", test_MAE,
                      " , MSE : ", test_MSE, "\n")

            process_time = time.time() - start_time
            Time = Time + (process_time - Time) / (epoch + 1)

            if epoch % 5 == 0:

                print("Average training time  per epoch : ", Time)

    print("Done.\n")
Exemple #2
0
    shape=[None, DESIRED_SIZE, DESIRED_SIZE, 1, NUMBER_OF_TRANSFORMATIONS])
y_gt = tf.placeholder(tf.float32, shape=[None, NUMBER_OF_CLASSES])
keep_prob = tf.placeholder(tf.float32)
logits = rot_mnist12K_model.define_model(x, keep_prob, NUMBER_OF_CLASSES,
                                         NUMBER_OF_FILTERS,
                                         NUMBER_OF_FC_FEATURES)
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_gt))
train_step = tf.train.AdamOptimizer(ADAM_LEARNING_RATE).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_gt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# run training
session = tf.Session()
session.run(tf.global_variables_initializer())
train_data_loader = tools.DataLoader(TRAIN_FILENAME, NUMBER_OF_CLASSES,
                                     NUMBER_OF_TRANSFORMATIONS, LOADED_SIZE,
                                     DESIRED_SIZE)
test_data_loader = tools.DataLoader(TEST_FILENAME, NUMBER_OF_CLASSES,
                                    NUMBER_OF_TRANSFORMATIONS, LOADED_SIZE,
                                    DESIRED_SIZE)
test_size = test_data_loader.all()[1].shape[0]
assert test_size % TEST_CHUNK_SIZE == 0
number_of_test_chunks = test_size / TEST_CHUNK_SIZE
while (True):
    batch = train_data_loader.next_batch(BATCH_SIZE)
    if (train_data_loader.is_new_epoch()):
        train_accuracy = session.run(accuracy,
                                     feed_dict={
                                         x: batch[0],
                                         y_gt: batch[1],
                                         keep_prob: 1.0
Exemple #3
0
import numpy as np

from sklearn.model_selection import KFold

import tools
import models
import metrics
import kernels
from penalizations import kernel_ridge, grad_kernel_ridge, ridge, grad_ridge

initial_time = time()

print("Cross-validation script", end="\n\n")

path_to_data = "data/"
data_loader = tools.DataLoader(path_to_data=path_to_data)

kernel_method = True

if kernel_method:
    y_train = data_loader.load_labels_only("Ytr.csv")

    # Compute first kernel
    X_train_1 = np.load("data/hog_grey_cell_2_block_4_Xtr.npy")
    poly_kernel_parameters_1 = {
        "gamma": .01,
        "d": 6.,
        "r": 12.0,
    }
    K_train_1 = kernels.polynomial_kernel_train(
        X_train_1, **poly_kernel_parameters_1