Esempio n. 1
0
def func2(shape):
	from keras.applications import ResNet101
	BS = 16
	conv_base = ResNet101(weights = 'imagenet',
                 	include_top = False,
                 	input_shape = (shape[0],shape[1],3))
	return BS,conv_base
Esempio n. 2
0
def get_resnet():
    base_resnet_model = ResNet101(input_shape=(128, 128, 3),
                                  include_top=False,
                                  weights=None)
    pooling_layer = GlobalAveragePooling2D()(base_resnet_model.output)
    dropout_layer1 = Dropout(0.5)(pooling_layer)
    dense_layer1 = Dense(512)(dropout_layer1)
    dropout_layer2 = Dropout(0.5)(dense_layer1)
    dense_layer2 = Dense(15, activation='sigmoid')(dropout_layer2)
    model = Model(inputs=base_resnet_model.inputs, outputs=dense_layer2)
    return model
Esempio n. 3
0
def build_resnet_model2(classes, version=50, input_shape=(224, 224, 3)):
    if version == 50:
        model = ResNet50(include_top=False,
                         weights='imagenet',
                         input_shape=input_shape)
    if version == 101:
        model = ResNet101(include_top=False,
                          weights='imagenet',
                          input_shape=input_shape)
    if version == 152:
        model = ResNet152(include_top=False,
                          weights='imagenet',
                          input_shape=input_shape)
    model.trainable = False
    model_input = model.input
    X = model.layers[-1]
    #conv_model = Model(inputs=model.input, outputs=transfer_layer.output)
    X = Flatten()(X.output)
    X = Dense(1024, activation='relu')(X)
    X = Dropout(0.25)(X)
    X = Dense(classes, activation='softmax')(X)
    new_model = Model(inputs=model_input, outputs=X)
    return input_shape, new_model
Esempio n. 4
0
def build_resnet_model(classes, version=50, input_shape=(224, 224, 3)):
    if version == 50:
        model = ResNet50(include_top=False,
                         weights='imagenet',
                         input_shape=input_shape)
    if version == 101:
        model = ResNet101(include_top=False,
                          weights='imagenet',
                          input_shape=input_shape)
    if version == 152:
        model = ResNet152(include_top=False,
                          weights='imagenet',
                          input_shape=input_shape)
    print('Using ResNet' + str(version))
    transfer_layer = model.layers[-1]
    #conv_model = Model(inputs=model.input, outputs=transfer_layer.output)
    new_model = Sequential()
    new_model.add(model)
    new_model.add(Flatten())
    new_model.add(Dense(1024, activation='relu'))
    new_model.add(Dropout(0.25))
    new_model.add(Dense(classes, activation='softmax'))
    model.trainable = False
    return input_shape, new_model
        0: 1.,
        1: 5., # weigh covid weights as 5x more than the others 
        2: 1.,
        3: 1.
    },
    callbacks=[checkpoint]
)

"""# V10 Finetune ResNet101"""

from keras.applications import ResNet101
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Input, Flatten,GlobalMaxPooling2D,BatchNormalization,InputLayer
from keras.optimizers import Adam
from keras import Sequential, Model

resnet = ResNet101(include_top=False,input_shape=(IMAGE_SHAPE,IMAGE_SHAPE,3))

x = resnet.layers[-1].output 
x = Flatten()(x)
x = Dense(512,activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(256,activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(4,activation='softmax')(x)

model = Model(inputs=resnet.inputs,output=x)
model.compile(optimizer=Adam(lr=3e-5),loss='categorical_crossentropy',metrics=['accuracy'],weighted_metrics=['accuracy'])

from keras.callbacks import ModelCheckpoint
checkpoint = ModelCheckpoint("model_checkpoints/v11/resnet101_finetune_weights_{epoch:02d}-{val_accuracy:.2f}.hdf5",save_best_only=True)
    print('ROC AUC: %f' % auc)
    # confusion matrix
    matrix = confusion_matrix(y_test_orig, yhat_classes)
    print(matrix)

if Train_ResNet101_Flipout:

    epochs = 300
    batch_size = 256

    #from tensorflow.keras.applications import ResNet50
    from tensorflow.keras.applications import ResNet50V2, ResNet101
    model2 = tf.keras.Sequential()

    #model2.add(ResNet50V2(include_top=False, pooling='avg', weights=None))
    model2.add(ResNet101(include_top=False, pooling='max', weights=None))
    #model2.add(ResNet50(include_top=False, pooling='avg', weights=None))
    #model2.add(tfp.layers.DenseFlipout(512))
    #model2.add(tfp.layers.DenseFlipout(256))
    #model2.add(tfp.layers.DenseFlipout(64))
    #model2.add(tfp.layers.DenseFlipout(32))
    model2.add(tfp.layers.DenseFlipout(1024))
    #model2.add(tf.keras.layers.DenseFlipout(512, activation='linear'))
    #model2.add(tfp.layers.DenseFlipout(512))
    #model2.add(tf.keras.layers.DenseFlipout(128, activation='linear'))
    model2.add(tfp.layers.DenseFlipout(64))
    model2.add(tfp.layers.DenseFlipout(32))
    model2.add(tfp.layers.DenseFlipout(num_features))
    '''
    def neg_log_likelihood(y_obs, y_pred, sigma=noise):
        dist = tfp.distributions.Normal(loc=y_pred, scale=sigma)
Esempio n. 7
0
)
X_test = np.load(
    '/home/pmcn/workspace/Test_Code/Resnet50/CIP_Generator/test_npy/X_test.npy'
)
Y_test = np.load(
    '/home/pmcn/workspace/Test_Code/Resnet50/CIP_Generator/test_npy/Y_test.npy'
)

print('X_train shape : ', X_train.shape)
print('Y_train shape : ', Y_train.shape)
print('X_test shape : ', X_test.shape)
print('Y_test shape : ', Y_test.shape)

model = ResNet101(include_top=False,
                  weights='imagenet',
                  input_tensor=None,
                  input_shape=(224, 224, 3),
                  pooling=None,
                  classes=9)
x = model.output
x = Flatten(name='flatten')(x)
x = Dropout(0.5)(x)
x = Dense(9, activation='softmax', name='softmax')(x)

model_final = Model(inputs=model.input, outputs=x)

model_final.compile(optimizer=tf.compat.v1.train.AdamOptimizer(0.0001),
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])

model_final.summary()
traning = model_final.fit(X_train, Y_train, epochs=100, batch_size=32)
Esempio n. 8
0
def Resnet_Net(trainable=None, net="ResNet50"):

    netold = ['ResNet50', 'ResNet101', 'ResNet152']
    # Preprocessing the dataset into keras feedable format
    if net not in netold:
        train_datagen = ImageDataGenerator(rotation_range=rotation,
                                           width_shift_range=width_shift,
                                           height_shift_range=height_shift,
                                           rescale=scale,
                                           shear_range=shear,
                                           zoom_range=zoom,
                                           horizontal_flip=horizontal,
                                           fill_mode=fill,
                                           validation_split=validation)
        test_datagen = ImageDataGenerator(rescale=scale, )
    if net in netold:
        train_datagen = ImageDataGenerator(
            dtype='float32',
            preprocessing_function=preprocess_input,
            validation_split=validation)
        test_datagen = ImageDataGenerator(
            dtype='float32', preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='training',
    )
    validation_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='validation')

    models_list = [
        'ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2',
        'ResNet152V2'
    ]

    # Loading the ResNet50 Model

    if net == "ResNet50":
        resnet = ResNet50(include_top=False,
                          weights='imagenet',
                          input_shape=input_sh,
                          pooling=pooling_model)
    if net == "ResNet101":
        resnet = ResNet101(include_top=False,
                           weights='imagenet',
                           input_shape=input_sh,
                           pooling=pooling_model)
    if net == "ResNet152":
        resnet = ResNet152(include_top=False,
                           weights='imagenet',
                           input_shape=input_sh,
                           pooling=pooling_model)
    if net == "ResNet50V2":
        resnet = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=input_sh,
                            pooling=pooling_model)
    if net == "ResNet101V2":
        resnet = ResNet101V2(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net == "ResNet152V2":
        resnet = ResNet152V2(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net not in models_list:
        raise ValueError('Please provide the raise model ')
    output = resnet.layers[-1].output
    if pooling_model is None:
        output = keras.layers.Flatten()(output)
    resnet = Model(resnet.input, output=output)
    print(resnet.summary())
    print('\n\n\n')
    # If you chose not for fine tuning
    if trainable is None:
        model = Sequential()
        model.add(resnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in resnet.layers:
            layer.trainable = False
        print("The model summary of Resnet  -->\n\n\n"
              )  # In this the Resnet50 layers are not trainable

        for i, layer in enumerate(resnet.layers):
            print(i, layer.name, layer.trainable)
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()

    if trainable is not None:
        # Make last block of the conv_base trainable:

        for layer in resnet.layers[:trainable]:
            layer.trainable = False
        for layer in resnet.layers[trainable:]:
            layer.trainable = True

        print('Last block of the conv_base is now trainable')

        for i, layer in enumerate(resnet.layers):
            print(i, layer.name, layer.trainable)

        model = Sequential()
        model.add(resnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in resnet.layers:
            layer.trainable = False
        print("The model summary of Resnet -->\n\n\n"
              )  # In this the Resnet50 layers are not trainable
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()
Esempio n. 9
0
# y_train = np_utils.to_categorical(y_train)
# y_test = np_utils.to_categorical(y_test)

# 정규화 / 피처를 늘린다 / 레귤러라리 제이션

# 데이터 전처리 2. 정규화
x_train = x_train.reshape(50000, 32, 32, 3).astype('float32') / 255
x_test = x_test.reshape(10000, 32, 32, 3).astype('float32') / 255

# 모델 구성
from keras.regularizers import l1, l2, l1_l2

input_tensor = Input(shape=(32, 32, 3))
rn101 = ResNet101(include_top=False,
                  weights='imagenet',
                  input_tensor=input_tensor)

# include_top은 원래 모델의 최후 전결합층을 사용할지 여부 False 일 경우 원래 모델의 합성곱층의 특징 추출 부분만 사용
# 이후 층에는 스스로 작성한 모델을 추가할 수 있다.
# weights에 imagenet을 지정하면 Imagenet 에서 학습한 가중치를 사용하고 None을 지정하면 임의의 가중치를 사용

model = Sequential()
model.add(rn101)
'''
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu', input_shape=(32, 32, 3)))
# model.add(Dropout(0.2))
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2, padding='same'))
model.add(Dropout(0.2))
Esempio n. 10
0
def convolutional(instruction=None,
                  read_mode=None,
                  preprocess=True,
                  data_path=None,
                  verbose=0,
                  new_folders=True,
                  image_column=None,
                  training_ratio=0.8,
                  fine_tune=False,
                  augmentation=True,
                  custom_arch=None,
                  pretrained=None,
                  epochs=10,
                  height=None,
                  width=None,
                  save_as_tfjs=None,
                  save_as_tflite=None,
                  generate_plots=True):
    '''
    Body of the convolutional function used that is called in the neural network query
    if the data is presented in images.
    :param many parameters: used to preprocess, tune, plot generation, and parameterizing the convolutional neural network trained.
    :return dictionary that holds all the information for the finished model.
    '''

    # data_path = get_folder_dir()

    logger("Generating datasets for classes")

    LR = 0.001
    plots = {}
    if pretrained:
        if not height:
            height = 224
        if not width:
            width = 224
        if height != 224 or width != 224:
            raise ValueError(
                "For pretrained models, both 'height' and 'width' must be 224."
            )

    if preprocess:
        if custom_arch:
            raise ValueError(
                "If 'custom_arch' is not None, 'preprocess' must be set to false."
            )

        read_mode_info = set_distinguisher(data_path, read_mode)
        read_mode = read_mode_info["read_mode"]

        training_path = "/proc_training_set"
        testing_path = "/proc_testing_set"

        if read_mode == "setwise":
            processInfo = setwise_preprocessing(data_path, new_folders, height,
                                                width)
            if not new_folders:
                training_path = "/training_set"
                testing_path = "/testing_set"

        # if image dataset in form of csv
        elif read_mode == "csvwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = csv_preprocessing(read_mode_info["csv_path"],
                                            data_path, instruction,
                                            image_column, training_ratio,
                                            height, width)

        # if image dataset in form of one folder containing class folders
        elif read_mode == "classwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = classwise_preprocessing(data_path, training_ratio,
                                                  height, width)

    else:
        training_path = "/training_set"
        testing_path = "/testing_set"
        processInfo = already_processed(data_path)

    num_channels = 3
    color_mode = 'rgb'
    if processInfo["gray_scale"]:
        num_channels = 1
        color_mode = 'grayscale'

    input_shape = (processInfo["height"], processInfo["width"], num_channels)
    input_single = (processInfo["height"], processInfo["width"])
    num_classes = processInfo["num_categories"]
    loss_func = ""
    output_layer_activation = ""

    if num_classes > 2:
        loss_func = "categorical_crossentropy"
        output_layer_activation = "softmax"
    elif num_classes == 2:
        num_classes = 1
        loss_func = "binary_crossentropy"
        output_layer_activation = "sigmoid"

    logger("Creating convolutional neural network dynamically")

    # Convolutional Neural Network

    # Build model based on custom_arch configuration if given
    if custom_arch:
        with open(custom_arch, "r") as f:
            custom_arch_dict = json.load(f)
            custom_arch_json_string = json.dumps(custom_arch_dict)
            model = model_from_json(custom_arch_json_string)

    # Build an existing state-of-the-art model
    elif pretrained:

        arch_lower = pretrained.get('arch').lower()

        # If user specifies value of pretrained['weights'] as 'imagenet', weights pretrained on ImageNet will be used
        if 'weights' in pretrained and pretrained.get('weights') == 'imagenet':
            # Load ImageNet pretrained weights
            if arch_lower == "vggnet16":
                base_model = VGG16(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "vggnet19":
                base_model = VGG19(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet50":
                base_model = ResNet50(include_top=False,
                                      weights='imagenet',
                                      input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet101":
                base_model = ResNet101(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet152":
                base_model = ResNet152(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "mobilenet":
                base_model = MobileNet(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "mobilenetv2":
                base_model = MobileNetV2(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet121":
                base_model = DenseNet121(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet169":
                base_model = DenseNet169(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet201":
                base_model = DenseNet201(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

        else:
            # Randomly initialized weights
            if arch_lower == "vggnet16":
                model = VGG16(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "vggnet19":
                model = VGG19(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "resnet50":
                model = ResNet50(include_top=True,
                                 weights=None,
                                 classes=num_classes)
            elif arch_lower == "resnet101":
                model = ResNet101(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "resnet152":
                model = ResNet152(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "mobilenet":
                model = MobileNet(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "mobilenetv2":
                model = MobileNetV2(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet121":
                model = DenseNet121(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet169":
                model = DenseNet169(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet201":
                model = DenseNet201(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

    else:
        model = Sequential()
        # model.add(
        #     Conv2D(
        #         64,
        #         kernel_size=3,
        #         activation="relu",
        #         input_shape=input_shape))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Conv2D(64, kernel_size=3, activation="relu"))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Flatten())
        # model.add(Dense(num_classes, activation="softmax"))
        # model.compile(
        #     optimizer="adam",
        #     loss=loss_func,
        #     metrics=['accuracy'])
        model.add(
            Conv2D(filters=64,
                   kernel_size=5,
                   activation="relu",
                   input_shape=input_shape))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(units=256, activation="relu"))
        model.add(Dropout(0.25))
        model.add(Dense(units=num_classes, activation="softmax"))

    if pretrained and 'weights' in pretrained and pretrained.get(
            'weights') == 'imagenet':
        for layer in base_model.layers:
            layer.trainable = False

    opt = Adam(learning_rate=LR)

    model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

    logger("Located image data")

    if augmentation:
        train_data = ImageDataGenerator(rescale=1. / 255,
                                        shear_range=0.2,
                                        zoom_range=0.2,
                                        horizontal_flip=True)
        test_data = ImageDataGenerator(rescale=1. / 255)

        logger('Dataset augmented through zoom, shear, flip, and rescale')
    else:
        train_data = ImageDataGenerator()
        test_data = ImageDataGenerator()

    logger("->", "Optimal image size identified: {}".format(input_shape))
    X_train = train_data.flow_from_directory(
        data_path + training_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["train_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])
    X_test = test_data.flow_from_directory(
        data_path + testing_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["test_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])

    if epochs <= 0:
        raise BaseException("Number of epochs has to be greater than 0.")

    print("\n")
    logger('Training image model')

    # model.summary()

    history = model.fit_generator(
        X_train,
        steps_per_epoch=X_train.n // X_train.batch_size,
        validation_data=X_test,
        validation_steps=X_test.n // X_test.batch_size,
        epochs=epochs,
        verbose=verbose)

    if fine_tune:

        logger(
            '->', 'Training accuracy: {}'.format(
                history.history['accuracy'][len(history.history['accuracy']) -
                                            1]))
        logger(
            '->',
            'Validation accuracy: {}'.format(history.history['val_accuracy'][
                len(history.history['val_accuracy']) - 1]))

        for layer in base_model.layers:
            layer.trainable = True

        opt = Adam(learning_rate=LR / 10)

        model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

        print("\n\n")
        logger('Training fine tuned model')

        fine_tuning_epoch = epochs + 10
        history_fine = model.fit_generator(
            X_train,
            steps_per_epoch=X_train.n // X_train.batch_size,
            validation_data=X_test,
            validation_steps=X_test.n // X_test.batch_size,
            epochs=fine_tuning_epoch,
            initial_epoch=history.epoch[-1],
            verbose=verbose)
        #frozen model acc and loss history
        acc = history.history['accuracy']
        val_acc = history.history['val_accuracy']

        loss = history.history['loss']
        val_loss = history.history['val_loss']

        #fine tuned model acc and loss history
        acc += history_fine.history['accuracy']
        val_acc += history_fine.history['val_accuracy']

        loss += history_fine.history['loss']
        val_loss += history_fine.history['val_loss']

        if generate_plots:
            plots = generate_fine_tuned_classification_plots(
                acc, val_acc, loss, val_loss, epochs)

    models = []
    losses = []
    accuracies = []
    model_data = []

    model_data.append(model)
    models.append(history)

    losses.append(
        history.history["val_loss"][len(history.history["val_loss"]) - 1])
    accuracies.append(
        history.history['val_accuracy'][len(history.history['val_accuracy']) -
                                        1])

    # final_model = model_data[accuracies.index(max(accuracies))]
    # final_hist = models[accuracies.index(max(accuracies))]

    if generate_plots and not fine_tune:
        plots = generate_classification_plots(models[len(models) - 1])

    print("\n")
    logger(
        '->', 'Final training accuracy: {}'.format(
            history.history['accuracy'][len(history.history['accuracy']) - 1]))
    logger(
        '->',
        'Final validation accuracy: {}'.format(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1]))
    # storing values the model dictionary

    number_of_examples = len(X_test.filenames)
    number_of_generator_calls = math.ceil(number_of_examples /
                                          (1.0 * X_test.batch_size))

    test_labels = []

    for i in range(0, int(number_of_generator_calls)):
        test_labels.extend(np.array(X_test[i][1]))

    predIdx = model.predict(X_test)

    if output_layer_activation == "sigmoid":
        real = [int(x) for x in test_labels]
        ans = []
        for i in range(len(predIdx)):
            ans.append(int(round(predIdx[i][0])))

    elif output_layer_activation == "softmax":
        real = []
        for ans in test_labels:
            real.append(ans.argmax())
        ans = []
        for r in predIdx:
            ans.append(r.argmax())

    else:
        print("NOT THE CASE")

    logger("Stored model under 'convolutional_NN' key")

    if save_as_tfjs:
        tfjs.converters.save_keras_model(model, "tfjsmodel")
        logger("Saved tfjs model under 'tfjsmodel' directory")

    if save_as_tflite:
        converter = tf.lite.TFLiteConverter.from_keras_model(model)
        tflite_model = converter.convert()
        open("model.tflite", "wb").write(tflite_model)
        logger("Saved tflite model as 'model.tflite' ")

    clearLog()

    K.clear_session()

    return {
        'id': generate_id(),
        'data_type': read_mode,
        'data_path': data_path,
        'data': {
            'train': X_train,
            'test': X_test
        },
        'shape': input_shape,
        'res': {
            'real': real,
            'ans': ans
        },
        'model': model,
        'plots': plots,
        'losses': {
            'training_loss': history.history['loss'],
            'val_loss': history.history['val_loss']
        },
        'accuracy': {
            'training_accuracy': history.history['accuracy'],
            'validation_accuracy': history.history['val_accuracy']
        },
        'num_classes': (2 if num_classes == 1 else num_classes),
        'data_sizes': {
            'train_size': processInfo['train_size'],
            'test_size': processInfo['test_size']
        }
    }
Esempio n. 11
0
from keras.applications import VGG16, VGG19, Xception, ResNet101, ResNet101V2, ResNet152, ResNet152V2
from keras.applications import ResNet50, ResNet50V2, InceptionV3, InceptionResNetV2
from keras.applications import MobileNet, MobileNetV2, DenseNet121, DenseNet169, DenseNet201
from keras.applications import NASNetLarge, NASNetMobile
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Activation
from keras.optimizers import Adam

# vgg16 = VGG16() # (None, 224, 224, 3)
# model = VGG19()
model = Xception()
model = ResNet101()
model = ResNet101V2()
model = ResNet152()
model = ResNet152V2()
model = ResNet50()
model = ResNet50V2()
model = InceptionV3()
model = InceptionResNetV2()
model = MobileNet()
model = MobileNetV2()
model = DenseNet121()
model = DenseNet169()
model = DenseNet201()
model = NASNetLarge()
model = NASNetMobile()

# vgg16.summary()
'''
model= Sequential()
# model.add(vgg16)
Esempio n. 12
0
    index = [i for i in range(len(X_test))]
    random.shuffle(index)
    X_test = X_test[index]
    Y_test = Y_test[index]

    return X_train, Y_train, X_test, Y_test

X_train, Y_train, X_test, Y_test = DataSet()
print('X_train shape : ', X_train.shape)
print('Y_train shape : ', Y_train.shape)
print('X_test shape : ', X_test.shape)
print('Y_test shape : ', Y_test.shape)

model = ResNet101(include_top=True,
                  weights=None,
                  input_tensor=None,
                  input_shape=None,
                  pooling=None,
                  classes=4)

model.compile(optimizer=tf.train.AdamOptimizer(0.0001),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.summary()
traning = model.fit(X_train, Y_train, epochs=100, batch_size=32)
model.save(
    '/home/pmcn/workspace/Test_Code/Resnet50/checkpoint/Para1Series_resnet101_model_1.h5'
)
preds = model.evaluate(X_test, Y_test)
print("Loss = " + str(preds[0]))
print("Test Accuracy = " + str(preds[1]))
Esempio n. 13
0
def get_base_model(input_shape=(768, 432, 6), compiled=True):

    ### ResNet Backbone ###

    inp = Input(shape=input_shape)
    img, bgr = Lambda(lambda x: tf.split(x, 2, axis=-1))(inp)
    #reduced_channels = Conv3D(filters=512, kernel_size=(3,3,2),padding='SAME')(inp)

    resnet = ResNet101(weights='imagenet', include_top=False, input_tensor=img)

    resnet.trainable = False
    backbone_in = resnet.input
    resblock1 = resnet.get_layer('conv1_relu').output
    resblock2 = resnet.get_layer('conv2_block3_out').output
    resblock3 = resnet.get_layer('conv3_block4_out').output
    backbone_out = resnet.output

    x = backbone_out
    #x = Add()([img_res,bgr_res])

    ### ASPP ###

    # conv block 1
    conv1 = Conv2D(ASPP_FILTERS,
                   1,
                   padding='SAME',
                   dilation_rate=1,
                   use_bias=False,
                   name='aspp_conv1_in')(x)
    conv1 = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(conv1)
    conv1 = ReLU()(conv1)

    # conv block 2
    conv2 = Conv2D(ASPP_FILTERS,
                   3,
                   padding='SAME',
                   dilation_rate=ASPP_DILATIONS[0],
                   use_bias=False,
                   name='aspp_conv2_in')(x)
    conv2 = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(conv2)
    conv2 = ReLU()(conv2)

    # conv block 3
    conv3 = Conv2D(ASPP_FILTERS,
                   3,
                   padding='SAME',
                   dilation_rate=ASPP_DILATIONS[1],
                   use_bias=False,
                   name='aspp_conv3_in')(x)
    conv3 = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(conv3)
    conv3 = ReLU()(conv3)

    # conv block 4
    conv4 = Conv2D(ASPP_FILTERS,
                   3,
                   padding='SAME',
                   dilation_rate=ASPP_DILATIONS[2],
                   use_bias=False,
                   name='aspp_conv4_in')(x)
    conv4 = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(conv4)
    conv4 = ReLU()(conv4)

    # pooling block
    dims = tf.shape(backbone_out)[1], tf.shape(x)[2]
    pool = GlobalAveragePooling2D(name='aspp_pool_in')(x)
    pool = pool[:, None, None, :]
    pool = Conv2D(ASPP_FILTERS, 1, use_bias=False)(pool)
    pool = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(pool)
    pool = ReLU()(pool)
    pool = tf.image.resize(pool, dims, 'nearest')

    # pyramid construction
    pyr = tf.concat([conv1, conv2, conv3, conv4, pool], axis=-1)
    pyr = Conv2D(ASPP_FILTERS, 1, use_bias=False)(pyr)
    pyr = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(pyr)
    pyr = ReLU()(pyr)
    pyr = Dropout(DROPOUT_RATE)(pyr)

    ### DECODER ###

    x4, x3, x2, x1, x0 = pyr, resblock3, resblock2, resblock1, backbone_in

    x = Lambda(lambda a: tf.image.resize(a, tf.shape(x3)[1:3]))(x4)
    x = tf.concat([x, x3], axis=-1)
    x = Conv2D(DECODER_CHANNELS[0], 3, padding='SAME', use_bias=False)(x)
    x = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(x)
    x = ReLU()(x)

    x = Lambda(lambda a: tf.image.resize(a, tf.shape(x2)[1:3]))(x)
    x = tf.concat([x, x2], axis=-1)
    x = Conv2D(DECODER_CHANNELS[1], 3, padding='SAME', use_bias=False)(x)
    x = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(x)
    x = ReLU()(x)

    x = Lambda(lambda a: tf.image.resize(a, tf.shape(x1)[1:3]))(x)
    x = tf.concat([x, x1], axis=-1)
    x = Conv2D(DECODER_CHANNELS[2], 3, padding='SAME', use_bias=False)(x)
    x = BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON)(x)
    x = ReLU()(x)

    x = Lambda(lambda a: tf.image.resize(a, tf.shape(x0)[1:3]))(x)
    x = tf.concat([x, x0], axis=-1)
    x = Conv2D(DECODER_CHANNELS[3], 3, padding='SAME', use_bias=True)(x)

    print(x.shape)

    out = x

    ### COMPILE ###

    model = Model(inputs=inp, outputs=out)

    if compiled:
        model.compile(optimizer='adam', loss=base_alpha_mse_loss)

    return model
Esempio n. 14
0
def transfer_learning_model(train_x, train_y, val_x, val_y, test_x, test_y, num_class, epoch, batch_size, model_type, reshape_size, l1_weight, l2_weight):

    print(train_x.shape)
    print(type(train_x))
    print('\n')
    print(train_x[0].shape)

    # change label into one hot
    train_y = keras.utils.to_categorical(train_y, num_classes=num_class)
    val_y = keras.utils.to_categorical(val_y, num_classes=num_class)
    #test_y = keras.utils.to_categorical(test_y, num_classes=num_class)
    print(test_y)

    if model_type == 'vgg16':
        pre_trained = VGG16(
            weights='imagenet',
            include_top=False, # True if we want to add Fully Connected Layer at the Last (False)
            input_shape=reshape_size + (3,)
        )
        pre_trained.trainable = False  # False if we want to freeze the weight

    elif model_type == 'vgg19':
        pre_trained = VGG19(
            weights='imagenet',
            include_top=False,
            input_shape=reshape_size+ (3,)
        )

    elif model_type == 'resnet101':
        pre_trained = ResNet101(
            weights='imagenet',
            include_top = False
            input_shape = reshape_size + (3,)
        )

    elif model_type == 'resnet50':
        pre_trained = ResNet50(
            weights='imagenet'
            include_top = False,
            input_shape = reshape_size + (3,)
        )

    elif model_type == 'xception':
        pre_trained = Xception(
            weights='imagenet',
            include_top=False,
            input_shape=reshape_size + (3,)
        )

    elif model_type == 'inception_v3':
        pre_trained = InceptionV3(
            weights='imagenet',
            include_top=False,
            input_shape=reshape_size + (3,)
        )

    elif model_type == 'mobilenet':
        pre_trained = MobileNet(
            weights='imagenet',
            include_top=False,
            input_shape=reshape_size + (3,)
        )

    #pre_trained.summary()
    # Add Fine-Tuning Layers
    finetune_model = models.Sequential()
    finetune_model.add(pre_trained)
    
    if model_type == 'resnet50':
        pass

    else:
        finetune_model.add(layers.Flatten())

    finetune_model.add(layers.Dense(num_class*128,# activation='relu',
        kernel_regularizer=regularizers.l1_l2(
            l1=l1_weight,
            l2=l2_weight)
        ))
    finetune_model.add(BatchNormalization())
    finetune_model.add(Activation('relu'))
    #finetune_model.add(layers.Dense(num_class*64, activation='relu'))
    
    finetune_model.add(layers.Dense(num_class*32,# activation='relu',
        kernel_regularizer=regularizers.l1_l2(
                l1=l1_weight,
                l2=l2_weight)
        ))
    finetune_model.add(BatchNormalization())
    finetune_model.add(Activation('relu'))
    
    #finetune_model.add(layers.Dense(num_class*16, activation='relu'))
    
    finetune_model.add(layers.Dense(num_class*8,# activation='relu',
        kernel_regularizer=regularizers.l1_l2(
                    l1=l1_weight,
                    l2=l2_weight)    
        ))
    finetune_model.add(BatchNormalization())
    finetune_model.add(Activation('relu'))
    
    finetune_model.add(layers.Dense(num_class, activation='softmax')) # Final Activation

    #finetune_model.summary()

    # Compile
    finetune_model.compile(
        loss = 'categorical_crossentropy',
        optimizer = 'adam',
        metrics=['acc']
    )

    history = finetune_model.fit(
        train_x,
        train_y,
        epochs=epoch,
        batch_size = batch_size,
        validation_data = (val_x, val_y)
    )

    # Test Performance
    '''
    TODO: Result 해결하는데 이슈가 있음 ### !
    '''
    y_pred = finetune_model.predict(test_x) #np.argmax
    y_pred = np.argmax(y_pred, axis=1)
    print('>> Predicted Results')
    print(y_pred)
    
    #test_y = np.argmax(test_y, axis=1)
    print('>> Ground Truth')
    print(test_y)

    accuracy = accuracy_score(test_y, y_pred)
    precision, recall, f1_score, _ = precision_recall_fscore_support(test_y, y_pred, average='micro')
    
    print(">> Test Performance <<")
    print('Acc: ', accuracy)
    print('Precision: ', precision)
    print('Recall: ', recall)
    print('F1 Score: ', f1_score)
    
Esempio n. 15
0
# print('x_train :',x_train.shape)
# print('x_test :',x_test.shape)
print('y_train :', y_train.shape)
print('y_test :', y_test.shape)

#2. 데이터 정규화
x_train = x_train / 255
x_test = x_test / 255

print('x_train :', x_train.shape)
print('x_test :', x_test.shape)

#.3 모델구성
resnet101 = ResNet101(include_top=False,
                      weights='imagenet',
                      input_tensor=Input(shape=(32, 32, 3)))
# include_top = False로 하면 직접 수정해서  layer 추가할 수 있음

model = Sequential()
model.add(resnet101)
model.add(Flatten())
model.add(Dense(256, name='hidden1'))
model.add(Activation('relu'))
model.add(Dense(10, name='output', activation='softmax'))

model.summary()

model.compile(optimizer=Adam(1e-4),
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])  #0.0001
Esempio n. 16
0
from keras.optimizers import Adam, SGD

from keras.datasets import cifar10
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline

(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train.shape)  # (50000, 32, 32, 3)
print(x_test.shape)  # (10000, 32, 32, 3)
print(y_train.shape)  # (50000, 1)
print(y_test.shape)  # (10000, 1)

ishape = (32, 32, 3)

resnet = ResNet101(include_top=False, weights='imagenet',
                   input_shape=ishape)  # (None, 224, 224, 3)
# vgg16.summary()

act = 'relu'
model = Sequential()

model.add(resnet)
model.add(Flatten())
model.add(Dense(256))
# model.add(BatchNormalization())
model.add(Activation(act))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))

for layer in model.layers[:19]:
    layer.trainable = False
Esempio n. 17
0
def convolutional(instruction=None,
                  read_mode=None,
                  preprocess=True,
                  data_path=None,
                  verbose=0,
                  new_folders=True,
                  image_column=None,
                  training_ratio=0.8,
                  augmentation=True,
                  custom_arch=None,
                  pretrained=None,
                  epochs=10,
                  height=None,
                  width=None):
    '''
    Body of the convolutional function used that is called in the neural network query
    if the data is presented in images.
    :param many parameters: used to preprocess, tune, plot generation, and parameterizing the convolutional neural network trained.
    :return dictionary that holds all the information for the finished model.
    '''

    # data_path = get_folder_dir()

    logger("Generating datasets for classes")

    if pretrained:
        if not height:
            height = 224
        if not width:
            width = 224
        if height != 224 or width != 224:
            raise ValueError(
                "For pretrained models, both 'height' and 'width' must be 224."
            )

    if preprocess:
        if custom_arch:
            raise ValueError(
                "If 'custom_arch' is not None, 'preprocess' must be set to false."
            )

        read_mode_info = set_distinguisher(data_path, read_mode)
        read_mode = read_mode_info["read_mode"]

        training_path = "/proc_training_set"
        testing_path = "/proc_testing_set"

        if read_mode == "setwise":
            processInfo = setwise_preprocessing(data_path, new_folders, height,
                                                width)
            if not new_folders:
                training_path = "/training_set"
                testing_path = "/testing_set"

        # if image dataset in form of csv
        elif read_mode == "csvwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = csv_preprocessing(read_mode_info["csv_path"],
                                            data_path, instruction,
                                            image_column, training_ratio,
                                            height, width)

        # if image dataset in form of one folder containing class folders
        elif read_mode == "classwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = classwise_preprocessing(data_path, training_ratio,
                                                  height, width)

    else:
        training_path = "/training_set"
        testing_path = "/testing_set"
        processInfo = already_processed(data_path)

    num_channels = 3
    color_mode = 'rgb'
    if processInfo["gray_scale"]:
        num_channels = 1
        color_mode = 'grayscale'

    input_shape = (processInfo["height"], processInfo["width"], num_channels)
    input_single = (processInfo["height"], processInfo["width"])
    num_classes = processInfo["num_categories"]
    loss_func = ""
    output_layer_activation = ""

    if num_classes > 2:
        loss_func = "categorical_crossentropy"
        output_layer_activation = "softmax"
    elif num_classes == 2:
        num_classes = 1
        loss_func = "binary_crossentropy"
        output_layer_activation = "sigmoid"

    logger("Creating convolutional neural netwwork dynamically")

    # Convolutional Neural Network

    # Build model based on custom_arch configuration if given
    if custom_arch:
        with open(custom_arch, "r") as f:
            custom_arch_dict = json.load(f)
            custom_arch_json_string = json.dumps(custom_arch_dict)
            model = model_from_json(custom_arch_json_string)

    # Build an existing state-of-the-art model
    elif pretrained:

        arch_lower = pretrained.get('arch').lower()

        # If user specifies value of pretrained['weights'] as 'imagenet', weights pretrained on ImageNet will be used
        if 'weights' in pretrained and pretrained.get('weights') == 'imagenet':
            # Load ImageNet pretrained weights
            if arch_lower == "vggnet16":
                base_model = VGG16(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "vggnet19":
                base_model = VGG19(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet50":
                base_model = ResNet50(include_top=False,
                                      weights='imagenet',
                                      input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet101":
                base_model = ResNet101(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet152":
                base_model = ResNet152(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

        else:
            # Randomly initialized weights
            if arch_lower == "vggnet16":
                model = VGG16(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "vggnet19":
                model = VGG19(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "resnet50":
                model = ResNet50(include_top=True,
                                 weights=None,
                                 classes=num_classes)
            elif arch_lower == "resnet101":
                model = ResNet101(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "resnet152":
                model = ResNet152(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")
    else:
        model = Sequential()
        # model.add(
        #     Conv2D(
        #         64,
        #         kernel_size=3,
        #         activation="relu",
        #         input_shape=input_shape))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Conv2D(64, kernel_size=3, activation="relu"))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Flatten())
        # model.add(Dense(num_classes, activation="softmax"))
        # model.compile(
        #     optimizer="adam",
        #     loss=loss_func,
        #     metrics=['accuracy'])
        model.add(
            Conv2D(filters=64,
                   kernel_size=5,
                   activation="relu",
                   input_shape=input_shape))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(units=256, activation="relu"))
        model.add(Dropout(0.25))
        model.add(Dense(units=num_classes, activation="softmax"))

    model.compile(optimizer="adam", loss=loss_func, metrics=['accuracy'])

    logger("Located image data")

    if augmentation:
        train_data = ImageDataGenerator(rescale=1. / 255,
                                        shear_range=0.2,
                                        zoom_range=0.2,
                                        horizontal_flip=True)
        test_data = ImageDataGenerator(rescale=1. / 255)

        logger('Dataset augmented through zoom, shear, flip, and rescale')
    else:
        train_data = ImageDataGenerator()
        test_data = ImageDataGenerator()

    logger("->", "Optimal image size identified: {}".format(input_shape))
    X_train = train_data.flow_from_directory(
        data_path + training_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["train_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])
    X_test = test_data.flow_from_directory(
        data_path + testing_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["test_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])

    if epochs <= 0:
        raise BaseException("Number of epochs has to be greater than 0.")
    logger('Training image model')
    history = model.fit_generator(
        X_train,
        steps_per_epoch=X_train.n // X_train.batch_size,
        validation_data=X_test,
        validation_steps=X_test.n // X_test.batch_size,
        epochs=epochs,
        verbose=verbose)

    logger(
        '->', 'Final training accuracy: {}'.format(
            history.history['accuracy'][len(history.history['accuracy']) - 1]))
    logger(
        '->',
        'Final validation accuracy: {}'.format(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1]))
    # storing values the model dictionary

    logger("Stored model under 'convolutional_NN' key")
    clearLog()
    return {
        'id': generate_id(),
        'data_type': read_mode,
        'data_path': data_path,
        'data': {
            'train': X_train,
            'test': X_test
        },
        'shape': input_shape,
        "model": model,
        'losses': {
            'training_loss': history.history['loss'],
            'val_loss': history.history['val_loss']
        },
        'accuracy': {
            'training_accuracy': history.history['accuracy'],
            'validation_accuracy': history.history['val_accuracy']
        },
        'num_classes': (2 if num_classes == 1 else num_classes),
        'data_sizes': {
            'train_size': processInfo['train_size'],
            'test_size': processInfo['test_size']
        }
    }
Esempio n. 18
0
from keras.datasets import cifar100, cifar10

from keras.applications import VGG16, VGG19, Xception, ResNet101
from keras.models import Sequential, Model, Input
from keras.layers import Dense, Conv2D, Flatten, BatchNormalization, Activation, MaxPooling2D, Dropout
from keras.optimizers import Adam

import dalex as dx
expl = dx.Explainer(clf, X, y, label="Titanic MLP Pipeline")

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

x_train = x_train.reshape(50000, 32, 32, 3).astype('float32') / 255.0
x_test = x_test.reshape(10000, 32, 32, 3).astype('float32') / 255.0

resnet101 = ResNet101(input_shape=(32, 32, 3), include_top=False)

model = Sequential()
model.add(resnet101)
model.add(Flatten())
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(10, activation='softmax'))

model.summary()

model.compile(optimizer=Adam(2e-4),
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])