Пример #1
0
def run_experiment(n, large_data_set=False, generator=False):
    """
    Run an experiment. One experiment loads the dataset, trains the model, and outputs the evaluation metrics after
    training.
    :param n: Number of experiments to perform
    :param large_data_set: Set to True of you want to save the large dataset to hard disk and use generator for training
    :param generator: Set to True is you want to use a generator to train the network.
    :return: n/a
    """

    for experiments in range(n):
        if large_data_set:
            save_data()
        else:
            data_train, data_test, data_val, info = load_data()
            data_train, data_test, data_val,\
            train_images, train_labels, test_images, test_labels,\
            val_images, val_labels = preprocess_data(data_train, data_test, data_val)
            visualize(data_train, data_test, info)

            # Fit the model on the training data and validate on the validation data.
            if generator:
                train_images = np.load(
                    '/home/mike/Documents/Alexnet_Client_Backend/file_names.npy'
                )
                train_labels = np.load(
                    '/home/mike/Documents/Alexnet_Client_Backend/oh_labels.npy'
                )
                train_data = DataGenerator(train_images, train_labels,
                                           batch_size)
            else:
                # Make the images, label paris into the tf.data.Dataset, shuffle the data and specify its batch size.
                train_data = tf.data.Dataset.from_tensor_slices(
                    (train_images, train_labels))
                train_data = train_data.repeat().shuffle(6149).batch(100)

                test_data = tf.data.Dataset.from_tensor_slices(
                    (test_images, test_labels))
                test_data = test_data.repeat().shuffle(1020).batch(batch_size)

                val_data = tf.data.Dataset.from_tensor_slices(
                    (val_images, val_labels))
                val_data = val_data.batch(batch_size)

                # With the three parameters, the client script calls the AlexNet model(as a class) in alexnet.py
                model = AlexNet(train_data, test_data, val_data)

                # Compile the model using Adam optimizer and categorical_crossentropy loss function.
                model.compile(optimizer='adam',
                              loss='categorical_crossentropy',
                              metrics=['acc', top_5_acc])

                # Give the model structure summary after complete the above-mentioned calling.
                model.summary()

                if generator:
                    file_names = np.load(data_path + 'file_names.npy')
                    num_files = file_names.shape[0]
                    del file_names
                    model.fit_generator(generator=train_data,
                                        steps_per_epoch=int(num_files //
                                                            batch_size),
                                        epochs=epochs,
                                        verbose=verbose,
                                        validation_data=val_data)
                else:
                    model.fit(train_data,
                              epochs=epochs,
                              validation_data=val_data,
                              verbose=verbose,
                              steps_per_epoch=steps_per_epoch)

                predictions(model, test_images, test_labels, num_examples=5)

                # Evaluate the model
                loss, accuracy, top_5 = model.evaluate(test_data,
                                                       verbose=verbose,
                                                       steps=5)

                # Append the metrics to the scores lists in case you are performing an experiment which involves comparing
                # training over many iterations.
                loss_scores.append(loss)
                acc_scores.append(accuracy)
                top_5_acc_scores.append(top_5)

                # Print the mean, std, min, and max of the validation accuracy scores from your experiment.
                print(acc_scores)
                print('Mean_accuracy={}'.format(np.mean(acc_scores)),
                      'STD_accuracy={}'.format(np.std(acc_scores)))
                print('Min_accuracy={}'.format(np.min(acc_scores)),
                      'Max_accuracy={}'.format(np.max(acc_scores)))
          verbose=1)


# It is the test generator as similar as the above. 
test_datagen = ImageDataGenerator(rescale=1.0/255)

test_generator = test_datagen.flow_from_directory(test_dir, 
                                                  target_size=(image_width,image_height), 
                                                  class_mode='categorical')

test_num = test_generator.samples


# Evalute the trained model and return both loss and the test accuracy. 
evals = model.evaluate(test_generator,
                        verbose=1,
                        batch_size=Batch_Size,
                        steps=test_num//Batch_Size)

print("Loss = " + str(evals[0]))
print("Test Accuracy = " + str(evals[1]))


# Predict the classifcation given the implicit steps=7301 for selecting the specific image number. 
predict_datagen = ImageDataGenerator(rescale=1.0/255)

predict_generator = predict_datagen.flow_from_directory(predict_dir, 
                                                        target_size=(image_width,image_height),
                                                        batch_size=Batch_Size,
                                                        class_mode='categorical')

predict_num = predict_generator.samples
Пример #3
0
print('Reading CIFAR-10...')
X_train, Y_train, X_test, Y_test = read_cifar_10(image_width=INPUT_WIDTH,
                                                 image_height=INPUT_HEIGHT)

alexnet = AlexNet(input_width=INPUT_WIDTH,
                  input_height=INPUT_HEIGHT,
                  input_channels=INPUT_CHANNELS,
                  num_classes=NUM_CLASSES,
                  learning_rate=LEARNING_RATE,
                  momentum=MOMENTUM,
                  keep_prob=KEEP_PROB)

with tf.Session() as sess:
    print('Evaluating dataset...')
    print()

    sess.run(tf.global_variables_initializer())

    print('Loading model...')
    print()
    alexnet.restore(sess, './model')

    print('Evaluating...')

    train_accuracy = alexnet.evaluate(sess, X_train, Y_train, BATCH_SIZE)
    test_accuracy = alexnet.evaluate(sess, X_test, Y_test, BATCH_SIZE)

    print('Train Accuracy = {:.3f}'.format(train_accuracy))
    print('Test Accuracy = {:.3f}'.format(test_accuracy))
    print()
Пример #4
0
EPOCHS = 5
MODEL_NAME = 'simple-alexnet-epoch{}.model'.format(EPOCHS)

print("[INFO] loading DATA...")
dataset = np.load(argv[1])
labelset = np.load(argv[2])
WIDTH = dataset.shape[1]
HEIGHT = dataset.shape[2]

data = dataset[:, :, :, np.newaxis]
(trainData, testData, trainLabels,
 testLabels) = train_test_split(data, labelset, test_size=0.1)

trainLabels = np_utils.to_categorical(trainLabels)
testLabels = np_utils.to_categorical(testLabels)

print("[INFO] compiling model...")
model = AlexNet(width=WIDTH, height=HEIGHT, lr=LR)

print("[INFO] training...")
model.fit({'input': trainData}, {'targets': trainLabels},
          validation_set=0.1,
          n_epoch=EPOCHS,
          snapshot_step=500,
          show_metric=False,
          run_id=MODEL_NAME)

print("[INFO] evaluating...")
accuracy = model.evaluate(testData, testLabels)[0]
print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))