Пример #1
0
def run_experiment(n, large_data_set=False, generator=False):
    """
    Run an experiment. One experiment loads the dataset, trains the model, and outputs the evaluation metrics after
    training.
    :param n: Number of experiments to perform
    :param large_data_set: Set to True of you want to save the large dataset to hard disk and use generator for training
    :param generator: Set to True is you want to use a generator to train the network.
    :return: n/a
    """

    for experiments in range(n):
        if large_data_set:
            save_data()
        else:
            data_train, data_test, data_val, info = load_data()
            data_train, data_test, data_val,\
            train_images, train_labels, test_images, test_labels,\
            val_images, val_labels = preprocess_data(data_train, data_test, data_val)
            visualize(data_train, data_test, info)

            # Fit the model on the training data and validate on the validation data.
            if generator:
                train_images = np.load(
                    '/home/mike/Documents/Alexnet_Client_Backend/file_names.npy'
                )
                train_labels = np.load(
                    '/home/mike/Documents/Alexnet_Client_Backend/oh_labels.npy'
                )
                train_data = DataGenerator(train_images, train_labels,
                                           batch_size)
            else:
                # Make the images, label paris into the tf.data.Dataset, shuffle the data and specify its batch size.
                train_data = tf.data.Dataset.from_tensor_slices(
                    (train_images, train_labels))
                train_data = train_data.repeat().shuffle(6149).batch(100)

                test_data = tf.data.Dataset.from_tensor_slices(
                    (test_images, test_labels))
                test_data = test_data.repeat().shuffle(1020).batch(batch_size)

                val_data = tf.data.Dataset.from_tensor_slices(
                    (val_images, val_labels))
                val_data = val_data.batch(batch_size)

                # With the three parameters, the client script calls the AlexNet model(as a class) in alexnet.py
                model = AlexNet(train_data, test_data, val_data)

                # Compile the model using Adam optimizer and categorical_crossentropy loss function.
                model.compile(optimizer='adam',
                              loss='categorical_crossentropy',
                              metrics=['acc', top_5_acc])

                # Give the model structure summary after complete the above-mentioned calling.
                model.summary()

                if generator:
                    file_names = np.load(data_path + 'file_names.npy')
                    num_files = file_names.shape[0]
                    del file_names
                    model.fit_generator(generator=train_data,
                                        steps_per_epoch=int(num_files //
                                                            batch_size),
                                        epochs=epochs,
                                        verbose=verbose,
                                        validation_data=val_data)
                else:
                    model.fit(train_data,
                              epochs=epochs,
                              validation_data=val_data,
                              verbose=verbose,
                              steps_per_epoch=steps_per_epoch)

                predictions(model, test_images, test_labels, num_examples=5)

                # Evaluate the model
                loss, accuracy, top_5 = model.evaluate(test_data,
                                                       verbose=verbose,
                                                       steps=5)

                # Append the metrics to the scores lists in case you are performing an experiment which involves comparing
                # training over many iterations.
                loss_scores.append(loss)
                acc_scores.append(accuracy)
                top_5_acc_scores.append(top_5)

                # Print the mean, std, min, and max of the validation accuracy scores from your experiment.
                print(acc_scores)
                print('Mean_accuracy={}'.format(np.mean(acc_scores)),
                      'STD_accuracy={}'.format(np.std(acc_scores)))
                print('Min_accuracy={}'.format(np.min(acc_scores)),
                      'Max_accuracy={}'.format(np.max(acc_scores)))
Пример #2
0
# Get the batch shape
for data_batch, label_batch in train_generator:
    print("data batch shape:", data_batch.shape)
    print("label batch shape:", label_batch)

    break

# Set the Tensorbaord
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
callback_list = [tensorboard_callback]

# Train the model
history = model.fit(train_generator,
                    steps_per_epoch=train_num // BATCH_SIZE,
                    epochs=EPOCHS,
                    validation_data=validation_generator,
                    validation_steps=val_num // BATCH_SIZE)

# Save the model
# -model.save('/home/mike/Documents/image_gesture/leapGestRecog_small_categorical.h5')

# Evaluate the model with visulizing the result
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))

plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
# The terminal shows the number of images belonging to 2 classes.
train_num = train_generator.samples
valid_num = valid_generator.samples

# Need to start the following command in Ubuntu Terminal after executing the script.
# tensorboard --logdir logs/fit
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
callback_list = [tensorboard_callback]

# Set up the verbose=1 (or verbose=1) for visible (or invisible) epochs.
model.fit(train_generator,
          epochs=EPOCHS,
          steps_per_epoch=train_num // BATCH_SIZE,
          validation_data=valid_generator,
          validation_steps=valid_num // BATCH_SIZE,
          callbacks=callback_list,
          verbose=1)

# The system saves the whole model into the direcotry: /home/mike/Documents/AlexNet-tf2/content. The
# model of my_model.h5 has the quite big size of 748.6 MB.
model.save(model_dir)

# To view the diagrams, users need to upload the Python script into Jupyter Notebook and run the
# the script or directly upload and run the original ipython script.
class_names = ['bike', 'ship']
x_valid, label_batch = next(iter(valid_generator))
prediction_values = model.predict_classes(x_valid)

# The plot will be realized in the Jupyter Notebook after running the script in either Python or
Пример #4
0
base_path = "dataset"
train_h5_file = "food_c101_n10099_r64x64x3.h5"
test_h5_file = "food_test_c101_n1000_r64x64x3.h5"

X_train, y_train, X_test, y_test = read_from_h5(base_path, train_h5_file,
                                                test_h5_file)

from keras.callbacks import ModelCheckpoint

checkpoint = ModelCheckpoint('best_model.h5',
                             monitor='val_loss',
                             save_best_only=True,
                             mode='auto')
steps_per_epoch = int(len(y_train) / BATCH_SIZE)  # 300
validation_steps = int(len(y_test) / BATCH_SIZE)  # 90

# Training
model_info = model.fit(X_train,
                       y_train,
                       validation_data=(X_test, y_test),
                       batch_size=BATCH_SIZE,
                       shuffle="batch",
                       epochs=NUM_EPOCH,
                       verbose=1)

model.save("food_classification.h5")

# plot model history after each epoch
from visulization import plot_model_history
plot_model_history(model_info)
Пример #5
0
import os.path
import wget

from sklearn.model_selection import train_test_split
from dataset import Dataset
from alexnet import AlexNet


print('Loading data..')
images, labels = Dataset('res/data1', AlexNet.SCALE_SIZE, mean_image=AlexNet.MEAN_IMAGE).load()

print('Splitting into train&validation..')
X_train, X_val, y_train, y_val = train_test_split(images, labels)

print('Looking for pre-trained weights..')
weights_file = 'res/alexnet-caffemodel.npy'
if not os.path.exists(weights_file):
    wget.download('https://www.dropbox.com/s/ekgz9jtj1ybtxmj/alexnet-caffemodel.npy?dl=1', 'res')

print('Fine-tuning AlexNet')
alex_net = AlexNet(labels.shape[1], weights_file)
alex_net.fit(X_train, X_val, y_train, y_val, freeze=True, epochs=1000, lr=0.001)
                                                    class_mode='categorical')

train_num = train_generator.samples


# Please start the following tensorboard in the Ubuntu Terminal after executing the script. 
# $ Tensorboard --logdir logs/fit
log_dir="logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
callback_list = [tensorboard_callback]


# Set verbose=1 (or verbose=0) for visibale (or invisible) training procedure. 
model.fit(train_generator,
          epochs=EPOCHS,
          steps_per_epoch=train_num//BATCH_SIZE,
          callbacks=callback_list,
          verbose=1)


# It is the test generator as similar as the above. 
test_datagen = ImageDataGenerator(rescale=1.0/255)

test_generator = test_datagen.flow_from_directory(test_dir, 
                                                  target_size=(image_width,image_height), 
                                                  class_mode='categorical')

test_num = test_generator.samples


# Evalute the trained model and return both loss and the test accuracy. 
Пример #7
0
EPOCHS = 5
MODEL_NAME = 'simple-alexnet-epoch{}.model'.format(EPOCHS)

print("[INFO] loading DATA...")
dataset = np.load(argv[1])
labelset = np.load(argv[2])
WIDTH = dataset.shape[1]
HEIGHT = dataset.shape[2]

data = dataset[:, :, :, np.newaxis]
(trainData, testData, trainLabels,
 testLabels) = train_test_split(data, labelset, test_size=0.1)

trainLabels = np_utils.to_categorical(trainLabels)
testLabels = np_utils.to_categorical(testLabels)

print("[INFO] compiling model...")
model = AlexNet(width=WIDTH, height=HEIGHT, lr=LR)

print("[INFO] training...")
model.fit({'input': trainData}, {'targets': trainLabels},
          validation_set=0.1,
          n_epoch=EPOCHS,
          snapshot_step=500,
          show_metric=False,
          run_id=MODEL_NAME)

print("[INFO] evaluating...")
accuracy = model.evaluate(testData, testLabels)[0]
print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))