Example #1
0
def train_top_layers(model, X_train, Y_train, X_test, Y_test, datagen):
    print("Compiling model...")
    # rms = RMSprop(lr=5e-4, rho=0.9, epsilon=1e-08, decay=0.01)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # train_datagen = ImageDataGenerator(rotation_range=30., shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
    # train_datagen = ImageDataGenerator(featurewise_center=False,
    #                                    samplewise_center=False,
    #                                    featurewise_std_normalization=False,
    #                                    samplewise_std_normalization=False,
    #                                    zca_whitening=False,
    #                                    rotation_range=0,
    #                                    width_shift_range=0.125,
    #                                    height_shift_range=0.125,
    #                                    horizontal_flip=True,
    #                                    vertical_flip=False,
    #                                    fill_mode='nearest')
    # train_gen, val_gen = _get_data_generators(train_datagen)
    callbacks = _get_callbacks(config.get_top_model_weights_path())
    test_datagen = ImageDataGenerator()
    model.fit_generator(datagen.flow(X_train, Y_train, shuffle=True),
                        samples_per_epoch=X_train.shape[0],
                        nb_epoch=train_top_layers_nb_epoch,
                        validation_data=test_datagen.flow(X_test, Y_test),
                        nb_val_samples=X_test.shape[0],
                        callbacks=callbacks)
    return model
def train_top_model(class_weight=None):
    train_data = np.load(open(config.bf_train_path, 'rb'))
    validation_data = np.load(open(config.bf_valid_path, 'rb'))

    train_labels = []
    validation_labels = []
    k = 0
    for i in config.classes:
        train_labels += [k] * util.get_dir_imgs_number(os.path.join(config.train_dir, i))
        validation_labels += [k] * util.get_dir_imgs_number(os.path.join(config.validation_dir, i))
        k += 1

    model = get_top_model_for_VGG16(shape=train_data.shape[1:], nb_class=len(config.classes), W_regularizer=True)
    rms = RMSprop(lr=5e-4, rho=0.9, epsilon=1e-08, decay=0.01)
    model.compile(optimizer=rms, loss='sparse_categorical_crossentropy', metrics=['accuracy'])

    early_stopping = EarlyStopping(verbose=1, patience=20, monitor='val_loss')
    model_checkpoint = ModelCheckpoint(
        config.get_top_model_weights_path(),
        save_best_only=True,
        save_weights_only=True,
        monitor='val_loss')
    callbacks_list = [early_stopping, model_checkpoint]

    history = model.fit(
        train_data,
        train_labels,
        nb_epoch=top_model_nb_epoch,
        validation_data=(validation_data, validation_labels),
        callbacks=callbacks_list,
        class_weight=class_weight)

    util.save_history(history=history, prefix='bottleneck')
def get_top_model_for_VGG16(nb_class=None,
                            shape=None,
                            W_regularizer=None,
                            weights_file_path=False,
                            input=None,
                            output=None):
    if not output:
        inputs = Input(shape=shape)
        x = Flatten(name='flatten')(inputs)
    else:
        x = Flatten(name='flatten', input_shape=shape)(output)

    #############################
    weights_file = None
    if weights_file_path:
        weights_file = h5.File(config.get_top_model_weights_path())

    #############################
    if W_regularizer:
        W_regularizer = l2(1e-2)

    weights_1 = get_layer_weights(weights_file, 'fc1')
    x = Dense(4096,
              activation='relu',
              W_regularizer=W_regularizer,
              weights=weights_1,
              name='fc1')(x)
    #############################

    x = Dropout(0.6)(x)

    #############################
    if W_regularizer:
        W_regularizer = l2(1e-2)

    weights_2 = get_layer_weights(weights_file, 'fc2')
    x = Dense(4096,
              activation='relu',
              W_regularizer=W_regularizer,
              weights=weights_2,
              name='fc2')(x)
    #############################

    x = Dropout(0.6)(x)

    #############################
    weights_3 = get_layer_weights(weights_file, 'predictions')
    predictions = Dense(nb_class,
                        activation='softmax',
                        weights=weights_3,
                        name='predictions')(x)
    #############################

    if weights_file:
        weights_file.close()

    model = Model(input=input or inputs, output=predictions)
    return model
def _cleanup():
    # remove unnecessary weights files
    files_to_remove = [
        config.bf_train_path, config.bf_valid_path,
        config.get_top_model_weights_path()
    ]
    for f in files_to_remove:
        os.remove(f)

    # move final model weights for further using
    os.rename(config.get_fine_tuned_weights_path(checkpoint=True),
              config.get_fine_tuned_weights_path())
Example #5
0
 def load_weights_from_top_model(self, top_model_weights_path=None):
     if top_model_weights_path is None:
         top_model_weights_path = config.get_top_model_weights_path(
             self.base_model)
     pretrained_model = utils.get_pretrained_model(
         self.base_model, include_top=False, input_shape=self.input_shape)
     top_model = TopModel(base_model=self.base_model,
                          fc_layer_size=self.fc_layer_size,
                          classes=self.classes)
     top_model.load_weights(top_model_weights_path)
     model = Model(inputs=pretrained_model.input,
                   outputs=top_model.model(pretrained_model.output))
     self.model = model
Example #6
0
 def __init__(self, base_model=None, fc_layer_size=2048, classes=None):
     self.fc_layer_size = fc_layer_size
     if not base_model:
         base_model = config.model
     assert utils.is_keras_pretrained_model(base_model)
     self.base_model = base_model
     if classes is None:
         classes = config.classes
     self.classes = np.array(classes)
     self.output_dim = len(classes)
     self.image_size = config.target_size_dict[base_model]
     self.model_weights_path = config.get_top_model_weights_path(base_model)
     self.model_path = config.get_top_model_path(base_model)
     self.preprocess_fun = data.preprocess_input_wrapper(self.base_model)
     self._create()
def tune(lr=0.0001, class_weight=None):
    model = load_model(nb_class=len(config.classes), weights_path=config.get_top_model_weights_path())

    model.compile(
        loss='categorical_crossentropy',
        optimizer=SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    # prepare data augmentation configuration
    train_datagen = ImageDataGenerator(
        rotation_range=30.,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)
    util.apply_mean(train_datagen)

    train_generator = train_datagen.flow_from_directory(
        config.train_dir,
        target_size=config.img_size,
        classes=config.classes)

    test_datagen = ImageDataGenerator()
    util.apply_mean(test_datagen)

    validation_generator = test_datagen.flow_from_directory(
        config.validation_dir,
        target_size=config.img_size,
        classes=config.classes)

    early_stopping = EarlyStopping(verbose=1, patience=30, monitor='val_loss')
    model_checkpoint = ModelCheckpoint(config.get_fine_tuned_weights_path(checkpoint=True),
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor='val_loss')
    history = model.fit_generator(
        train_generator,
        samples_per_epoch=config.nb_train_samples,
        nb_epoch=fine_tuning_nb_epoch,
        validation_data=validation_generator,
        nb_val_samples=config.nb_validation_samples,
        callbacks=[early_stopping, model_checkpoint],
        class_weight=class_weight)

    util.save_history(history=history, prefix='fine-tuning')
    util.save_classes(config.classes)

    _cleanup()
def transfer_learning(base_model=None,
                      fc_layer_size=2048,
                      freeze_layers_num=None,
                      classes=None,
                      epochs_top_model=250,
                      epochs_transfer_model=250,
                      lr_top_model=1e-3,
                      lr_transfer_model=1e-4,
                      project_path=None):
    if project_path is None:
        project_path = config.abspath
    config.trained_dir = os.path.join(project_path, 'trained')
    config.precomputed_dir = os.path.join(project_path, 'precomputed')
    utils.create_dir(config.trained_dir)
    utils.create_dir(config.precomputed_dir)
    config.get_top_model_weights_path(base_model)
    config.get_transfer_model_weights_path(base_model)
    config.get_top_model_path(base_model)
    config.get_transfer_model_path(base_model)
    if base_model is None:
        base_model = config.model
    assert utils.is_keras_pretrained_model(base_model)
    if classes is not None:
        classes = config.classes
    print('Started extracting bottleneck features for train data')
    x_train = data.get_bottleneck_features_from_path_train(
        model=base_model,
        classes=classes,
        save=False,
        verbose=True)
    print('Finished extracting bottleneck features for train data')
    y_train = data.get_y_from_path_train(
        classes=classes,
        shuffle=False,
        save=False,
        verbose=True)
    print('Started extracting bottleneck features for valid data')
    x_valid = data.get_bottleneck_features_from_path_valid(
        model=base_model,
        classes=classes,
        save=False,
        verbose=True)
    print('Finished extracting bottleneck features for valid data')
    y_valid = data.get_y_from_path_valid(
        classes=classes,
        shuffle=False,
        save=False,
        verbose=True)
    top_model = models.TopModel(
        base_model=base_model,
        fc_layer_size=fc_layer_size)
    top_model.fit(
        x_train,
        y_train,
        epochs=epochs_top_model,
        validation_data=(x_valid, y_valid),
        lr=lr_top_model)
    transfer_model = models.TransferModel(
        base_model=base_model,
        fc_layer_size=fc_layer_size)
    transfer_model.load_weights_from_top_model()
    transfer_model.fit_generator(
        epochs=epochs_transfer_model,
        lr=lr_transfer_model)
    return transfer_model