Beispiel #1
0
def create_model():
    base_model = ResNet152V2(include_top=False,
                             pooling="max",
                             input_shape=input_shape,
                             classes=15)
    print(base_model.summary())
    model = Sequential()
    model.add(base_model)
    model.add(Dense(580))
    model.add(Dropout(0.5))
    model.add(Dense(len(finding_labels), activation='sigmoid'))

    METRICS = [
        # keras.metrics.TruePositives(name='tp'),
        # keras.metrics.FalsePositives(name='fp'),
        # keras.metrics.TrueNegatives(name='tn'),
        # keras.metrics.FalseNegatives(name='fn'),
        keras.metrics.Precision(name='precision'),
        keras.metrics.Recall(name='recall'),
        keras.metrics.AUC(name='AUC'),
    ]

    model.compile(loss=keras.losses.binary_crossentropy,
                  optimizer=SGD(lr=0.01, momentum=0.01),
                  metrics=METRICS)
    return model
def get_model(model_name):

    if model_name == 'VGG16':

        from keras.applications import VGG16
        model = VGG16(weights="imagenet", include_top=False, pooling='avg')
        size = 512  # if pooling is 'avg', else  512 * 7 * 7 if pooling is 'None'

    elif model_name == 'ResNet50':

        from keras.applications import ResNet50
        model = ResNet50(weights="imagenet", include_top=False, pooling='avg')
        size = 2048,  # if pooling is 'avg',  2048 * 7 * 7 if pooling is 'None'

    elif model_name == 'ResNet152':

        from keras.applications import ResNet152
        model = ResNet152(weights="imagenet", include_top=False, pooling='avg')
        size = 2048  # if pooling is 'avg', 2048 * 7 * 7 # if pooling is 'None'

    elif model_name == 'ResNet152V2':

        from keras.applications import ResNet152V2
        model = ResNet152V2(weights="imagenet",
                            include_top=False,
                            pooling='avg')
        size = 2048  # if pooling is 'avg', 2048 * 7 * 7 # if pooling is 'None'

    elif model_name == 'DenseNet121':

        from keras.applications import DenseNet121
        model = DenseNet121(weights="imagenet",
                            include_top=False,
                            pooling='avg'),
        size = 1024  # if pooling is 'avg'

    elif model_name == 'Custom':

        ## CUSTOM MODEL

        from keras.models import load_model
        model = load_model(config.FINE_TUNED_MODEL)
        size = 2048  # our trained models are based on ResNet152

    else:

        raise ValueError(
            "Model needs to be defined. Examples: VGG16 or ResNet50.")

    return model, size
Beispiel #3
0
 def train(self):
     # re-size all the images to this
     IMAGE_SIZE = [224, 224]
     # add preprocessing layer to the front of VGG
     resnet = ResNet152V2(input_shape=IMAGE_SIZE + [3],
                          weights='imagenet',
                          include_top=False)
     # don't train existing weights
     for layer in resnet.layers:
         layer.trainable = False
     # useful for getting number of classes
     folders = glob(self.train_path + '*')
     # our layers - you can add more if you want
     x = Flatten()(resnet.output)
     prediction = Dense(len(folders), activation='sigmoid')(x)
     # create a model object
     model = Model(inputs=resnet.input, outputs=prediction)
     # view the structure of the model
     model.summary()
     # tell the model what cost and optimization method to use
     model.compile(loss='binary_crossentropy',
                   optimizer='adam',
                   metrics=['accuracy'])
     # Use the Image Data Generator to import the images from the dataset
     train_datagen = ImageDataGenerator(rescale=1. / 255,
                                        shear_range=0.2,
                                        zoom_range=0.2,
                                        horizontal_flip=True)
     test_datagen = ImageDataGenerator(rescale=1. / 255)
     training_set = train_datagen.flow_from_directory(
         self.train_path,
         target_size=(224, 224),
         batch_size=32,
         class_mode='categorical')
     test_set = test_datagen.flow_from_directory(self.train_path,
                                                 target_size=(224, 224),
                                                 batch_size=32,
                                                 class_mode='categorical')
     # fit the model
     r = model.fit_generator(training_set,
                             validation_data=test_set,
                             epochs=10,
                             steps_per_epoch=2,
                             validation_steps=len(test_set))
     model.save(self.model_save_path)
def get_model():

    conv_base = ResNet152V2(weights='imagenet',
                            include_top=False,
                            input_shape=(width, height, 3))
    conv_base.trainable = True

    x = conv_base.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.4)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.4)(x)
    x = BatchNormalization()(x)
    preds = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=conv_base.input, outputs=preds)

    model.compile(optimizer=optimizers.Adam(learning_rate=2e-5),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
Beispiel #5
0
    def load(self,
             model_path,
             number_classes=3,
             resnet_type=ModelType.RESNET50):

        if not os.path.isfile(model_path):
            raise Exception(
                "model with '{0}'  path not found".format(model_path))

        base_model = None

        if resnet_type == ModelType.RESNET150:
            base_model = ResNet152V2(weights="imagenet", include_top=False)
        elif resnet_type == ModelType.RESNET50:
            base_model = ResNet50(weights="imagenet", include_top=False)

        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(1000, activation="relu")(x)
        x = Dropout(0.5)(x)
        pred = Dense(number_classes, activation="softmax")(x)
        model = Model(inputs=base_model.input, outputs=pred)
        model.load_weights(model_path)
        return model
Beispiel #6
0
def Resnet_Net(trainable=None, net="ResNet50"):

    netold = ['ResNet50', 'ResNet101', 'ResNet152']
    # Preprocessing the dataset into keras feedable format
    if net not in netold:
        train_datagen = ImageDataGenerator(rotation_range=rotation,
                                           width_shift_range=width_shift,
                                           height_shift_range=height_shift,
                                           rescale=scale,
                                           shear_range=shear,
                                           zoom_range=zoom,
                                           horizontal_flip=horizontal,
                                           fill_mode=fill,
                                           validation_split=validation)
        test_datagen = ImageDataGenerator(rescale=scale, )
    if net in netold:
        train_datagen = ImageDataGenerator(
            dtype='float32',
            preprocessing_function=preprocess_input,
            validation_split=validation)
        test_datagen = ImageDataGenerator(
            dtype='float32', preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='training',
    )
    validation_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='validation')

    models_list = [
        'ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2',
        'ResNet152V2'
    ]

    # Loading the ResNet50 Model

    if net == "ResNet50":
        resnet = ResNet50(include_top=False,
                          weights='imagenet',
                          input_shape=input_sh,
                          pooling=pooling_model)
    if net == "ResNet101":
        resnet = ResNet101(include_top=False,
                           weights='imagenet',
                           input_shape=input_sh,
                           pooling=pooling_model)
    if net == "ResNet152":
        resnet = ResNet152(include_top=False,
                           weights='imagenet',
                           input_shape=input_sh,
                           pooling=pooling_model)
    if net == "ResNet50V2":
        resnet = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=input_sh,
                            pooling=pooling_model)
    if net == "ResNet101V2":
        resnet = ResNet101V2(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net == "ResNet152V2":
        resnet = ResNet152V2(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net not in models_list:
        raise ValueError('Please provide the raise model ')
    output = resnet.layers[-1].output
    if pooling_model is None:
        output = keras.layers.Flatten()(output)
    resnet = Model(resnet.input, output=output)
    print(resnet.summary())
    print('\n\n\n')
    # If you chose not for fine tuning
    if trainable is None:
        model = Sequential()
        model.add(resnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in resnet.layers:
            layer.trainable = False
        print("The model summary of Resnet  -->\n\n\n"
              )  # In this the Resnet50 layers are not trainable

        for i, layer in enumerate(resnet.layers):
            print(i, layer.name, layer.trainable)
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()

    if trainable is not None:
        # Make last block of the conv_base trainable:

        for layer in resnet.layers[:trainable]:
            layer.trainable = False
        for layer in resnet.layers[trainable:]:
            layer.trainable = True

        print('Last block of the conv_base is now trainable')

        for i, layer in enumerate(resnet.layers):
            print(i, layer.name, layer.trainable)

        model = Sequential()
        model.add(resnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in resnet.layers:
            layer.trainable = False
        print("The model summary of Resnet -->\n\n\n"
              )  # In this the Resnet50 layers are not trainable
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()
def design_model(train, test, ytrain, ytest, epoch, nclas, value, val):
    #intialize time for training and testing
    tr = 0.0
    tt = 0.0

    #start time
    st = time.time()

    #input shape
    image_input = Input(shape=(None, None, 3))
    #adding a lambda layer to reshape to size 224 by 224
    #this make sure that image are converted to 224,224 before entering the model
    prep = Lambda(lambda x: tf.image.resize(x, (224, 224)))(image_input)
    #using pretained weights from imagnet for transfer learning
    #using  InceptionResNetV2 model
    dmodel = InceptionResNetV2(include_top=False,
                               weights='imagenet',
                               input_shape=(224, 224, 3))(prep)
    dx = GlobalAveragePooling2D()(dmodel)
    i_model = Model(image_input, dx)
    i_model.summary()

    #get the deep feature from InceptionResNetV2, feature extraction from InceptionResNetV2 model
    #get training deep features
    df_train_i = i_model.predict(train,
                                 batch_size=32,
                                 workers=50,
                                 use_multiprocessing=True,
                                 verbose=1)

    #get testing deep features
    df_test_i = i_model.predict(test,
                                batch_size=32,
                                workers=50,
                                use_multiprocessing=True,
                                verbose=1)

    #using pretained weights from imagnet for transfer learning
    #using ResNet152V2 model
    vmodel = ResNet152V2(include_top=False,
                         weights='imagenet',
                         input_shape=(224, 224, 3))(prep)
    vx = GlobalAveragePooling2D()(vmodel)
    r_model = Model(image_input, vx)
    r_model.summary()

    #get deep features from ResNet152V2, extraction of deep features
    #get training features
    df_train_r = r_model.predict(train,
                                 batch_size=32,
                                 workers=50,
                                 use_multiprocessing=True,
                                 verbose=1)

    #get testing features
    df_test_r = r_model.predict(test,
                                batch_size=32,
                                workers=50,
                                use_multiprocessing=True,
                                verbose=1)

    #combining of deep features from both the InceptionResNetV2 and ResNet152V2 models, Feature fusion or feature combination
    #fusion of deep features extracted from InceptionResNetV2 and ResNet152V2 model and creating final_train and final_test set of features
    final_train = tf.keras.layers.Concatenate()([df_train_i, df_train_r])
    final_test = tf.keras.layers.Concatenate()([df_test_i, df_test_r])

    #my own fully connected classifier(ANN)
    classifier = Sequential([
        Dense(4096, activation='relu', input_shape=final_train[0].shape),
        Dropout(0.5),
        Dense(nclas, activation='softmax')
    ])

    #optimizer
    opt = Adam(lr=1e-4, decay=1e-4 / 50)

    #compile my own classifier
    classifier.compile(loss="categorical_crossentropy",
                       optimizer=opt,
                       metrics=["accuracy"])

    #reduce_lr method is used to reduce the learning rate if the learning rate is stagnant or if there are no major improvements during training
    reduce_lr = ReduceLROnPlateau(monitor='loss',
                                  factor=0.2,
                                  patience=5,
                                  min_lr=0.001)
    #early stopping method is used to montior the loss if there are no significant reductions in loss then halts the training
    es = EarlyStopping(monitor='loss', patience=10)
    #fit the model
    history = classifier.fit(final_train,
                             ytrain,
                             epochs=epoch,
                             batch_size=32,
                             shuffle=True,
                             verbose=1,
                             workers=50,
                             use_multiprocessing=True,
                             callbacks=[reduce_lr, es])

    #total time for training
    tr = time.time() - st
    #plot the graph of accuracy and loss for training
    plotgraph(history, value, val)
    #start time for testing
    st = time.time()
    #evalute the model
    (loss, accuracy) = classifier.evaluate(final_test,
                                           ytest,
                                           batch_size=32,
                                           verbose=1,
                                           workers=50,
                                           use_multiprocessing=True)

    #total time for testing
    tt = time.time() - st

    #training and testing accuracy
    train_acc = history.history["accuracy"][-1:][0]
    test_acc = accuracy

    #free memory
    del i_model, df_train_i, df_test_i, r_model, df_train_r, df_test_r, final_train, final_test, classifier, history

    return tr, tt, train_acc, test_acc
batch_size = 32
training_set = aug.flow_from_directory(train_dir,
                                       target_size=(224, 224),
                                       batch_size=batch_size,
                                       class_mode="categorical",
                                       subset="training")
test_set = aug.flow_from_directory(train_dir,
                                   target_size=(224, 224),
                                   batch_size=batch_size,
                                   class_mode="categorical",
                                   subset="validation")

# define architecture
baseModel = ResNet152V2(weights="imagenet",
                        include_top=False,
                        input_shape=(224, 224, 3))
headModel = baseModel.output
headModel = GlobalAveragePooling2D()(headModel)
headModel = Dropout(0.25)(headModel)
headModel = Dense(39, activation='sigmoid',
                  name="resnet152v2_dense")(headModel)

model = Model(inputs=baseModel.input, outputs=headModel, name="ResNet152V2")

model.trainable = True
print(model.summary())


# define criteria for stopping. we will stop training if validation accuracy got reached 98%
class myCallback(tf.keras.callbacks.Callback):
Beispiel #9
0
from keras.applications import VGG16, VGG19, Xception, ResNet101, ResNet101V2, ResNet152, ResNet152V2
from keras.applications import ResNet50, ResNet50V2, InceptionV3, InceptionResNetV2
from keras.applications import MobileNet, MobileNetV2, DenseNet121, DenseNet169, DenseNet201
from keras.applications import NASNetLarge, NASNetMobile
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Activation
from keras.optimizers import Adam

# vgg16 = VGG16() # (None, 224, 224, 3)
# model = VGG19()
model = Xception()
model = ResNet101()
model = ResNet101V2()
model = ResNet152()
model = ResNet152V2()
model = ResNet50()
model = ResNet50V2()
model = InceptionV3()
model = InceptionResNetV2()
model = MobileNet()
model = MobileNetV2()
model = DenseNet121()
model = DenseNet169()
model = DenseNet201()
model = NASNetLarge()
model = NASNetMobile()

# vgg16.summary()
'''
model= Sequential()
# model.add(vgg16)
Beispiel #10
0
    random_img = random.choice(os.listdir(path))
    img_path = os.path.join(path, random_img)
    img = image.load_img(img_path, target_size=(img_width, img_height))
    img_tensor = image.img_to_array(
        img)  # Image data encoded as integers in the 0–255 range
    img_tensor /= 255.  # Normalize to [0,1] for plt.imshow application
    plt.imshow(img_tensor)
    plt.show()


# Instantiate convolutional base
from keras.applications import ResNet152V2

conv_base = ResNet152V2(
    weights='imagenet',
    include_top=False,
    input_shape=(img_width, img_height,
                 3))  # 3 = number of channels in RGB pictures
# Freeze all
for layer in conv_base.layers:
    layer.trainable = False
    # Fine-tuning the model
    set_trainable = False
    for layer in conv_base.layers:
        if layer.name in ['block5_conv1', 'block4_conv1']: set_trainable = True
        if set_trainable: layer.trainable = True
        else: layer.trainable = False
conv_base.summary()

#pass our images through it for feature extraction
# Extract features
Beispiel #11
0
xy = (X_train, X_test, y_train, y_test)


#일반화
X_train = X_train.astype(float) / 255
X_test = X_test.astype(float) / 255

history = additional_model.fit(X_train, y_train, 
                    batch_size=1, 
                    epochs=1, 
                    validation_data=(X_test, y_test))


input_tensor = Input(shape=(400, 300, 3), dtype='float32', name='input')

pre_trained_res = ResNet152V2(weights='imagenet', include_top=False, input_shape=(400, 300, 3))
pre_trained_res.trainable = False
pre_trained_res.summary()


additional_model = models.Sequential()
additional_model.add(pre_trained_res)
additional_model.add(layers.Flatten())
additional_model.add(layers.Dense(4096, activation='relu'))
additional_model.add(layers.Dense(2048, activation='relu'))
additional_model.add(layers.Dense(1024, activation='relu'))
additional_model.add(layers.Dense(3, activation='softmax'))

additional_model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4),
              metrics=['acc'])
        batch_size=256)  # we can use much larger batches for evaluation

    # used a fixed dataset for evaluating the algorithm
    test_X, test_Y = next(
        flow_from_dataframe(img_gen,
                            valid_df,
                            location_col='location',
                            y_col=disease,
                            target_size=IMG_SIZE,
                            color_mode='rgb',
                            batch_size=256))  # one big batch

    t_x, t_y = next(train_gen)

    base_model = ResNet152V2(input_shape=t_x.shape[1:],
                             include_top=False,
                             weights='imagenet')
    base_model.trainable = False
    # (Mader, 2018)
    pt_features = Input(base_model.get_output_shape_at(0)[1:],
                        name='feature_input')
    pt_depth = base_model.get_output_shape_at(0)[-1]
    bn_features = BatchNormalization(name='Features_BN')(pt_features)
    # (Mader, 2018)
    attn_layer = Conv2D(180,
                        kernel_size=(1, 1),
                        padding='same',
                        activation='elu')(bn_features)
    attn_layer = Conv2D(64,
                        kernel_size=(1, 1),
                        padding='same',