Example #1
0
def level1():
    # Building the model using the pretrained model
    conv_base1 = NASNetLarge(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))
    print("\n### LEVEL1 ###\npretrained network:")
    conv_base1.summary()
    model = models.Sequential()
    model.add(conv_base1)
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dense(fcLayer1, activation='relu'))
    model.add(layers.Dropout(dropout))
    model.add(layers.Dense(classes, activation='softmax'))

    # freezing the base network
    print("trainable layers bevor freezing:", int(len(model.trainable_weights)/2)) # weights = weights + bias = 2 pro layer
    conv_base1.trainable = False
    print("trainable layers after freezing:", int(len(model.trainable_weights)/2))
    print("\npretrained network + densely connected classifier")
    model.summary()

    # training the added layers only
    model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=learning_rate, decay=lr_decay), metrics=['acc'])

    callbacks_list_L1 = [ModelCheckpoint(filepath=weights_path, save_weights_only=True, monitor='val_acc', verbose=1, save_best_only=True),
                      ReduceLROnPlateau(monitor='val_acc', factor=factorL1, patience=patiencel1, verbose=1),
                      TensorBoard(log_dir=TensorBoardLogDir+'\\level1')]

    print("\n### Level1 Training ... ")
    # training the model
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=(nbrTrainImages * classes) // (batch * 5),
        epochs=epochsL1,
        callbacks=callbacks_list_L1,
        validation_data=test_generator,
        validation_steps=nbrTestImages,
        verbose=verbose_train)

    history_val1 = [history.history]  # saving all results of the final test
    plot(history_val1, "LEVEL1:", epochsL1)
    print("\n### LEVEL1 Training finished successfully ###")

    print("\nLoading trained weights from " + weights_path + " ...")
    model.load_weights(weights_path)
    model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=learning_rate), metrics=['acc'])
    print("\n### Saving Level1 Model to ", model_path+'l1.h5', " ... ")
    model.save(model_path+'l1.h5')
Example #2
0
def Nas_Net(trainable=None, net="NASNetMobile"):

    # Preprocessing the dataset into keras feedable format

    train_datagen = ImageDataGenerator(rotation_range=rotation,
                                       width_shift_range=width_shift,
                                       height_shift_range=height_shift,
                                       rescale=scale,
                                       shear_range=shear,
                                       zoom_range=zoom,
                                       horizontal_flip=horizontal,
                                       fill_mode=fill,
                                       validation_split=validation)
    test_datagen = ImageDataGenerator(rescale=scale, )

    train_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='training',
    )
    validation_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='validation')

    models_list = ['NASNetLarge', 'NASNetMobile']

    # Loading the NasNet Model

    if net == "NASNetLarge":
        nasnet = NASNetLarge(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net == "NASNetMobile":
        nasnet = NASNetMobile(include_top=False,
                              weights='imagenet',
                              input_shape=input_sh,
                              pooling=pooling_model)
    if net not in models_list:
        raise ValueError('Please provide the raise model ')
    output = nasnet.layers[-1].output
    if pooling_model is None:
        output = keras.layers.Flatten()(output)
    nasnet = Model(nasnet.input, output=output)
    print(nasnet.summary())
    print('\n\n\n')
    # If you chose not for fine tuning
    if trainable is None:
        model = Sequential()
        model.add(nasnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in nasnet.layers:
            layer.trainable = False
        print("The model summary of Nasnet  -->\n\n\n"
              )  # In this the Nasnet layers are not trainable

        for i, layer in enumerate(nasnet.layers):
            print(i, layer.name, layer.trainable)
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()

    if trainable is not None:
        # Make last block of the conv_base trainable:

        for layer in nasnet.layers[:trainable]:
            layer.trainable = False
        for layer in nasnet.layers[trainable:]:
            layer.trainable = True

        print('Last block of the conv_base is now trainable')

        for i, layer in enumerate(nasnet.layers):
            print(i, layer.name, layer.trainable)

        model = Sequential()
        model.add(nasnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in nasnet.layers:
            layer.trainable = False
        print("The model summary of Nasnet -->\n\n\n"
              )  # In this the Nasnet layers are not trainable
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()