Example #1
0
    def test_layers_connected(self):
        train_dir = os.path.join(file_dir, 'unit_test_images/')
        validation_dir = os.path.join(file_dir, 'unit_test_images/')
        test_dir = os.path.join(file_dir, 'unit_test_images/')

        model = rt.KerasInception(dense_layers=1, dropout=0, dense_dim=1024)

        # initialize model (size of dense layer), 0 epochs
        model.train(
            train_dir=train_dir,
            validation_dir=validation_dir,
            epochs=0,
        )

        self.assertTrue(
            np.array_equal(
                model.model.get_layer('base_output').output,
                model.model.get_layer('pooling').input))
        self.assertTrue(
            np.array_equal(
                model.model.get_layer('pooling').output,
                model.model.get_layer('dense0').input))
        self.assertTrue(
            np.array_equal(
                model.model.get_layer('dense0').output,
                model.model.get_layer('softmax').input))
Example #2
0
    def test_full_training(self):
        path_of_zip = os.path.join(file_dir, 'unit_test_images_zipped.zip')
        train_dir = rt.unzip_and_return_path_to_folder(path_of_zip)

        # dummy directories with black/white images
        validation_dir = os.path.join(file_dir, 'unit_test_images/')
        test_dir = os.path.join(file_dir, 'unit_test_images/')

        # train
        model = rt.KerasInception(dense_layers=1, dropout=0, dense_dim=1024)

        model.train(train_dir=train_dir,
                    validation_dir=validation_dir,
                    fine_tune=True,
                    epochs=2,
                    salt_pepper=True,
                    classes_txt_dir=os.getcwd(),
                    unfrozen_layers=311,
                    steps_per_epoch=1000)

        # evaluate
        score = model.evaluate(test_dir=test_dir)

        print("accuracy on test images")
        print(score[1])
        # check if significantly better than random
        self.assertTrue(score[1] > 0.6)
Example #3
0
    def test_base_model_stable(self):
        # dummy directories with black/white images
        train_dir = os.path.join(file_dir, 'unit_test_images/')
        validation_dir = os.path.join(file_dir, 'unit_test_images/')
        test_dir = os.path.join(file_dir, 'unit_test_images/')

        # initialize two lists for weights
        before_transferred_weights = []
        after_transferred_weights = []

        # store all weights before
        base_model = rt.InceptionV3(weights='imagenet', include_top=False)

        # create model
        model = rt.KerasInception(dense_layers=1, dropout=0.01, dense_dim=1024)

        # initialize model (size of dense layer), 0 epochs
        model.train(
            train_dir=train_dir,
            validation_dir=validation_dir,
            epochs=0,
            augmentation_params=rt.get_augmentation_params(1),
        )

        for layer_bm, layer_fm in zip(base_model.layers, model.model.layers):
            before_transferred_weights.append(layer_fm.get_weights())

        # train
        model.train(train_dir=train_dir,
                    validation_dir=validation_dir,
                    epochs=1,
                    salt_pepper=True,
                    classes_txt_dir=os.getcwd(),
                    unfrozen_layers=0,
                    augmentation_params=rt.get_augmentation_params(0),
                    steps_per_epoch=1000)

        # store all weights after
        for layer_bm, layer_fm in zip(base_model.layers, model.model.layers):
            after_transferred_weights.append(layer_fm.get_weights())

        # check that nothing changed for any of the layers
        for b, a in zip(before_transferred_weights, after_transferred_weights):
            self.assertTrue(np.array_equal(b, a))
Example #4
0
    def test_training_last_layers(self):
        train_dir = os.path.join(file_dir,
                                 os.path.join(file_dir, 'unit_test_images/'))
        validation_dir = os.path.join(
            file_dir, os.path.join(file_dir, 'unit_test_images/'))
        test_dir = os.path.join(file_dir,
                                os.path.join(file_dir, 'unit_test_images/'))

        # create model
        model = rt.KerasInception(dense_layers=1, dropout=0, dense_dim=1024)

        # initialize model (size of dense layer), 0 epochs
        model.train(
            train_dir=train_dir,
            validation_dir=validation_dir,
            epochs=0,
        )

        # store weights before
        before_softmax = model.model.layers[-1].get_weights()
        before_dense = model.model.layers[-2].get_weights()

        # train
        model.train(train_dir=train_dir,
                    validation_dir=validation_dir,
                    fine_tune=True,
                    epochs=1,
                    salt_pepper=True,
                    classes_txt_dir=os.getcwd(),
                    unfrozen_layers=311,
                    steps_per_epoch=1000,
                    validation_dir_2=validation_dir)

        # store weights after
        after_softmax = model.model.layers[-1].get_weights()
        after_dense = model.model.layers[-2].get_weights()

        # check that something has changed
        self.assertFalse(np.array_equal(before_softmax, after_softmax))
        self.assertFalse(np.array_equal(before_dense, after_dense))
Example #5
0
def main():
    train_dir = 'PATH/TO/DIRECTORY'
    validation_dir = 'PATH/TO/DIRECTORY'
    test_dir = 'PATH/TO/DIRECTORY'

    # can add second dir if need two validation sets
    extra_validation_dir = None

    dense_layers = 1
    input_dim = 224
    batch_size = 64
    # if true, 2 inception layers will be trained for 1 epoch at end of training
    fine_tune = False
    # if True, it adds SP noise
    add_salt_pepper_noise = False
    augmentation_mode = 0
    epochs = 10
    # has to be a number between 0 and 311
    unfrozen_layers = 311
    learning_rate = 0.0031622777

    model = rt.KerasInception(input_dim=input_dim,
                              batch_size=batch_size,
                              dense_layers=dense_layers)

    model.train(
        train_dir=train_dir,
        validation_dir=validation_dir,
        fine_tune=fine_tune,
        epochs=epochs,
        unfrozen_layers=unfrozen_layers,
        salt_pepper=add_salt_pepper_noise,
        augmentation_params=rt.get_augmentation_params(augmentation_mode),
        validation_dir_2=extra_validation_dir)

    model.evaluate(test_dir=test_dir)

    model.save_model('model.h5')
Example #6
0
def grid_search():
    logging = True
    log_filename = 'log_unfrozen_layers_grid_1epoch.csv'

    train_dir = '/data/g1753002_ocado/manhattan_project/training_data/split_ten_set_model_official_SUN_back_2018-04-07_13_19_16/train'
    validation_dir = '/data/g1753002_ocado/manhattan_project/training_data/split_ten_set_model_official_SUN_back_2018-04-07_13_19_16/validation'
    test_dir = '/data/g1753002_ocado/manhattan_project/test_data/extended_test_set_ambient'

    learning_rate_grid = np.logspace(-2, 0, 6)  # originally 10 pow -5
    unfrozen_layers_grid = np.linspace(0, 311, 10)
    dropout_grid = [0, 0.2, 0.5]
    layer_grid = [1, 2]
    batch_size_grid = [16, 32, 64]

    # go through grid of parameters
    for lr in learning_rate_grid:

        # set parameters
        input_dim = 224
        fine_tune = False
        add_salt_pepper_noise = False  # if True, it adds SP noise
        augmentation_mode = 0  # 0 = no augmentation, 1 = rotation only, 2 = rotation & zoom
        epochs = 18
        unfrozen_layers = 311

        learning_rate = lr  # 0.0001
        dense_layers = 1
        batch_size = 64
        dropout = 0

        # initialize & train model
        model = retrain.KerasInception(input_dim=input_dim,
                                       batch_size=batch_size,
                                       dense_layers=dense_layers,
                                       dropout=dropout,
                                       lr=learning_rate)

        model.train(
            train_dir=train_dir,
            validation_dir=validation_dir,
            fine_tune=fine_tune,
            epochs=epochs,
            salt_pepper=add_salt_pepper_noise,
            augmentation_params=get_augmentation_params(augmentation_mode),
            save_model=True,
            unfrozen_layers=unfrozen_layers)

        # get accuracy score
        test_loss, test_acc = model.evaluate(test_dir=test_dir)

        # store accuracy & model parameters
        if logging:
            print("logging now...")
            my_file = Path(log_filename)

            # write header if this is the first run
            if not my_file.is_file():
                print("writing head")
                with open(log_filename, "w") as log:
                    log.write(
                        "datetime,epochs,learning_rate,batch_size,unfrozen_layers,input_dim,dense_layers,dropout,test_loss,test_acc\n"
                    )

            # append parameters
            with open(log_filename, "a") as log:
                log.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
                log.write(',')
                log.write(str(epochs))
                log.write(',')
                log.write(str(learning_rate))
                log.write(',')
                log.write(str(batch_size))
                log.write(',')
                log.write(str(unfrozen_layers))
                log.write(',')
                log.write(str(input_dim))
                log.write(',')
                log.write(str(dense_layers))
                log.write(',')
                log.write(str(dropout))
                log.write(',')
                log.write(str(test_loss))
                log.write(',')
                log.write(str(test_acc))
                log.write('\n')
Example #7
0
def evaluate_cnn(learning_rate, dense_layers, batch_size, dropout, dense_dim):
    """
    script which takes a number of parameters for neural network training and
    returns the best resulting validation accuracy from these parameters
    results are also logged to log_filename defined below
    """

    # turn float inputs from BO to ints
    dense_layers = int(dense_layers + 0.5)
    dense_dim = int(dense_dim + 0.5) * 512
    batch_size = int(batch_size + 0.5) * 16

    logging = True
    log_filename = 'log_bo_cnn_' + launch_datetime + '.csv'

    # load train images from one zip file
    unzipped_dir = retrain.unzip_and_return_path_to_folder(path_of_zip)
    train_dir = unzipped_dir + '/images'

    # get path for classes.txt
    main_dir, filename = os.path.split(path_of_zip)

    # set parameters
    fine_tune = False  # if true, some of the inceptionV3 layers will be trained for 5 epochs at the end of training
    add_salt_pepper_noise = False  # if True, it adds SP noise
    augmentation_mode = 0  # 0 = no augmentation, 1 = rotation only, 2 = rotation & zoom
    epochs = 600 / 4
    input_dim = 224

    # initialize & train model
    model = retrain.KerasInception(input_dim=input_dim,
                                   batch_size=batch_size,
                                   dense_layers=dense_layers,
                                   dropout=dropout,
                                   lr=learning_rate,
                                   dense_dim=dense_dim)

    history = model.train(
        train_dir=train_dir,
        validation_dir=validation_dir,
        fine_tune=fine_tune,
        epochs=epochs,
        salt_pepper=add_salt_pepper_noise,
        augmentation_params=retrain.get_augmentation_params(augmentation_mode),
        classes_txt_dir=main_dir,
        save_model=True)

    # get accuracy score
    # score = model.evaluate(test_dir=test_dir)
    # test_accuracy = score[1]
    test_accuracy = 0

    # store accuracy & model parameters
    if logging:
        print("logging now...")
        my_file = Path(log_filename)

        # write header if this is the first run
        if not my_file.is_file():
            print("writing head")
            with open(log_filename, "w") as log:
                log.write("datetime,epochs,learning_rate,batch_size,input_dim,\
                dense_layers,dropout,dense_dim,best_validation_accuracy,\
                test_accuracy,file\n")

        # append parameters
        with open(log_filename, "a") as log:
            log.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
            log.write(',')
            log.write(str(epochs))
            log.write(',')
            log.write(str(learning_rate))
            log.write(',')
            log.write(str(batch_size))
            log.write(',')
            log.write(str(input_dim))
            log.write(',')
            log.write(str(dense_layers))
            log.write(',')
            log.write(str(dropout))
            log.write(',')
            log.write(str(dense_dim))
            log.write(',')
            log.write(str(max(history.val_accs)))
            log.write(',')
            log.write(str(test_accuracy))
            log.write(',')
            log.write(path_of_zip)
            log.write('\n')

    return max(history.val_accs)  # return best validation accuracy