예제 #1
0
BATCH_SIZE = 100
image_width = 227
image_height = 227
channels = 3
num_classes = 10

# Call the cnn/alexnet model
model = AlexNet((image_width, image_height, channels), num_classes)

# Model configuration
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
              loss='categorical_crossentropy',
              metrics=['acc'])

# Summary
model.summary()

# Directories for the datasets
train_dir = '/home/mike/Documents/image_gesture/dset_data/train'
val_dir = '/home/mike/Documents/image_gesture/dset_data/validation'
test_dir = '/home/mike/Documents/image_gesture/dset_data/test'

# Preprocess the images
train_datagen = ImageDataGenerator(rescale=1.0 / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(image_width,
                                                                 image_height),
                                                    batch_size=BATCH_SIZE,
                                                    class_mode='categorical')
예제 #2
0
def run_experiment(n, large_data_set=False, generator=False):
    """
    Run an experiment. One experiment loads the dataset, trains the model, and outputs the evaluation metrics after
    training.
    :param n: Number of experiments to perform
    :param large_data_set: Set to True of you want to save the large dataset to hard disk and use generator for training
    :param generator: Set to True is you want to use a generator to train the network.
    :return: n/a
    """

    for experiments in range(n):
        if large_data_set:
            save_data()
        else:
            data_train, data_test, data_val, info = load_data()
            data_train, data_test, data_val,\
            train_images, train_labels, test_images, test_labels,\
            val_images, val_labels = preprocess_data(data_train, data_test, data_val)
            visualize(data_train, data_test, info)

            # Fit the model on the training data and validate on the validation data.
            if generator:
                train_images = np.load(
                    '/home/mike/Documents/Alexnet_Client_Backend/file_names.npy'
                )
                train_labels = np.load(
                    '/home/mike/Documents/Alexnet_Client_Backend/oh_labels.npy'
                )
                train_data = DataGenerator(train_images, train_labels,
                                           batch_size)
            else:
                # Make the images, label paris into the tf.data.Dataset, shuffle the data and specify its batch size.
                train_data = tf.data.Dataset.from_tensor_slices(
                    (train_images, train_labels))
                train_data = train_data.repeat().shuffle(6149).batch(100)

                test_data = tf.data.Dataset.from_tensor_slices(
                    (test_images, test_labels))
                test_data = test_data.repeat().shuffle(1020).batch(batch_size)

                val_data = tf.data.Dataset.from_tensor_slices(
                    (val_images, val_labels))
                val_data = val_data.batch(batch_size)

                # With the three parameters, the client script calls the AlexNet model(as a class) in alexnet.py
                model = AlexNet(train_data, test_data, val_data)

                # Compile the model using Adam optimizer and categorical_crossentropy loss function.
                model.compile(optimizer='adam',
                              loss='categorical_crossentropy',
                              metrics=['acc', top_5_acc])

                # Give the model structure summary after complete the above-mentioned calling.
                model.summary()

                if generator:
                    file_names = np.load(data_path + 'file_names.npy')
                    num_files = file_names.shape[0]
                    del file_names
                    model.fit_generator(generator=train_data,
                                        steps_per_epoch=int(num_files //
                                                            batch_size),
                                        epochs=epochs,
                                        verbose=verbose,
                                        validation_data=val_data)
                else:
                    model.fit(train_data,
                              epochs=epochs,
                              validation_data=val_data,
                              verbose=verbose,
                              steps_per_epoch=steps_per_epoch)

                predictions(model, test_images, test_labels, num_examples=5)

                # Evaluate the model
                loss, accuracy, top_5 = model.evaluate(test_data,
                                                       verbose=verbose,
                                                       steps=5)

                # Append the metrics to the scores lists in case you are performing an experiment which involves comparing
                # training over many iterations.
                loss_scores.append(loss)
                acc_scores.append(accuracy)
                top_5_acc_scores.append(top_5)

                # Print the mean, std, min, and max of the validation accuracy scores from your experiment.
                print(acc_scores)
                print('Mean_accuracy={}'.format(np.mean(acc_scores)),
                      'STD_accuracy={}'.format(np.std(acc_scores)))
                print('Min_accuracy={}'.format(np.min(acc_scores)),
                      'Max_accuracy={}'.format(np.max(acc_scores)))