Example #1
0
def run_experiment(n, large_data_set=False, generator=False):
    """
    Run an experiment. One experiment loads the dataset, trains the model, and outputs the evaluation metrics after
    training.
    :param n: Number of experiments to perform
    :param large_data_set: Set to True of you want to save the large dataset to hard disk and use generator for training
    :param generator: Set to True is you want to use a generator to train the network.
    :return: n/a
    """

    for experiments in range(n):
        if large_data_set:
            save_data()
        else:
            data_train, data_test, data_val, info = load_data()
            data_train, data_test, data_val,\
            train_images, train_labels, test_images, test_labels,\
            val_images, val_labels = preprocess_data(data_train, data_test, data_val)
            visualize(data_train, data_test, info)

            # Fit the model on the training data and validate on the validation data.
            if generator:
                train_images = np.load(
                    '/home/mike/Documents/Alexnet_Client_Backend/file_names.npy'
                )
                train_labels = np.load(
                    '/home/mike/Documents/Alexnet_Client_Backend/oh_labels.npy'
                )
                train_data = DataGenerator(train_images, train_labels,
                                           batch_size)
            else:
                # Make the images, label paris into the tf.data.Dataset, shuffle the data and specify its batch size.
                train_data = tf.data.Dataset.from_tensor_slices(
                    (train_images, train_labels))
                train_data = train_data.repeat().shuffle(6149).batch(100)

                test_data = tf.data.Dataset.from_tensor_slices(
                    (test_images, test_labels))
                test_data = test_data.repeat().shuffle(1020).batch(batch_size)

                val_data = tf.data.Dataset.from_tensor_slices(
                    (val_images, val_labels))
                val_data = val_data.batch(batch_size)

                # With the three parameters, the client script calls the AlexNet model(as a class) in alexnet.py
                model = AlexNet(train_data, test_data, val_data)

                # Compile the model using Adam optimizer and categorical_crossentropy loss function.
                model.compile(optimizer='adam',
                              loss='categorical_crossentropy',
                              metrics=['acc', top_5_acc])

                # Give the model structure summary after complete the above-mentioned calling.
                model.summary()

                if generator:
                    file_names = np.load(data_path + 'file_names.npy')
                    num_files = file_names.shape[0]
                    del file_names
                    model.fit_generator(generator=train_data,
                                        steps_per_epoch=int(num_files //
                                                            batch_size),
                                        epochs=epochs,
                                        verbose=verbose,
                                        validation_data=val_data)
                else:
                    model.fit(train_data,
                              epochs=epochs,
                              validation_data=val_data,
                              verbose=verbose,
                              steps_per_epoch=steps_per_epoch)

                predictions(model, test_images, test_labels, num_examples=5)

                # Evaluate the model
                loss, accuracy, top_5 = model.evaluate(test_data,
                                                       verbose=verbose,
                                                       steps=5)

                # Append the metrics to the scores lists in case you are performing an experiment which involves comparing
                # training over many iterations.
                loss_scores.append(loss)
                acc_scores.append(accuracy)
                top_5_acc_scores.append(top_5)

                # Print the mean, std, min, and max of the validation accuracy scores from your experiment.
                print(acc_scores)
                print('Mean_accuracy={}'.format(np.mean(acc_scores)),
                      'STD_accuracy={}'.format(np.std(acc_scores)))
                print('Min_accuracy={}'.format(np.min(acc_scores)),
                      'Max_accuracy={}'.format(np.max(acc_scores)))
    tf.config.experimental.set_memory_growth(gpu, True)

# Assign the global arguments
EPOCHS = 32
BATCH_SIZE = 100
image_width = 227
image_height = 227
channels = 3
num_classes = 10

# Call the cnn/alexnet model
model = AlexNet((image_width, image_height, channels), num_classes)

# Model configuration
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
              loss='categorical_crossentropy',
              metrics=['acc'])

# Summary
model.summary()

# Directories for the datasets
train_dir = '/home/mike/Documents/image_gesture/dset_data/train'
val_dir = '/home/mike/Documents/image_gesture/dset_data/validation'
test_dir = '/home/mike/Documents/image_gesture/dset_data/test'

# Preprocess the images
train_datagen = ImageDataGenerator(rescale=1.0 / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(image_width,
Example #3
0
from model import cnn_model
from alexnet import AlexNet

# Variables
CLASSES = 101
IMAGE_SIZE = 64
CHANNELS = 3
NUM_EPOCH = 500
LEARN_RATE = 1.0e-4
BATCH_SIZE = 32

# Model Architecture and Compilation
model = AlexNet(CLASSES, IMAGE_SIZE, CHANNELS)
adam = Adam(lr=LEARN_RATE, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
model.compile(optimizer=adam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# Image Preprocessing

from get_train_test import read_from_h5

base_path = "dataset"
train_h5_file = "food_c101_n10099_r64x64x3.h5"
test_h5_file = "food_test_c101_n1000_r64x64x3.h5"

X_train, y_train, X_test, y_test = read_from_h5(base_path, train_h5_file,
                                                test_h5_file)

from keras.callbacks import ModelCheckpoint