Ejemplo n.º 1
0
    def build_base_model(self, inputs, blocks=None):
        inputs = keras.layers.Lambda(
            lambda x: keras_densenet.preprocess_input(x))(inputs)

        if self.backbone_name == 'densenet121':
            densenet = keras_densenet.DenseNet121(include_top=False,
                                                  weights='imagenet',
                                                  input_tensor=inputs)
        elif self.backbone_name == 'densenet169':
            densenet = keras_densenet.DenseNet169(include_top=False,
                                                  input_tensor=inputs,
                                                  weights='imagenet')
        elif self.backbone_name == 'densenet201':
            densenet = keras_densenet.DenseNet201(include_top=False,
                                                  input_tensor=inputs,
                                                  weights='imagenet')
        elif self.backbone_name == 'densenet':
            if blocks is None:
                raise ValueError(
                    'blocks must be specified to use custom densenet backbone')

            densenet = keras_densenet.DenseNet(blocks=blocks,
                                               include_top=False,
                                               input_tensor=inputs,
                                               weights='imagenet')
        else:
            raise ValueError("Backbone '{}' not recognized.".format(
                self.backbone_name))

        return densenet
def create_model(image_size):
    densenet_conv = densenet.DenseNet201(weights='imagenet',
                                         include_top=True,
                                         input_shape=(image_size, image_size,
                                                      3))
    model = models.Sequential()
    model.add(densenet_conv)
    model.summary()
    return model
def createImageModel(height, width, depth):
    inputShape = (height, width, depth)
    # define the model input
    inputs = Input(shape=inputShape)
    # at least 32x32
    return densenet.DenseNet201(include_top=True,
                                weights=None,
                                input_tensor=inputs,
                                input_shape=inputShape,
                                pooling=None,
                                classes=9)
Ejemplo n.º 4
0
 def denseNet201_classificator(self, image_path):
     denseNet201_model = densenet.DenseNet201(weights='imagenet')
     filename = image_path
     original = load_img(filename, target_size=(WIDTH, HEIGHT))
     plt.imshow(original)
     numpy_image = img_to_array(original)
     plt.imshow(np.uint8(numpy_image))
     image_batch = np.expand_dims(numpy_image, axis=0)
     plt.imshow(np.uint8(image_batch[0]))
     processed_image = densenet.preprocess_input(image_batch.copy())
     predictions = denseNet201_model.predict(processed_image)
     label = decode_predictions(predictions)
     return sorted(label[0], key=lambda x: x[2], reverse=True)
Ejemplo n.º 5
0
def get_model(input_shape, output_shape, readonly_until):
    """Get the model."""
    # load model
    model = densenet.DenseNet201(
        weights='imagenet',
        include_top=False,
        input_shape=input_shape
    )
    # make them readonly
    model = _set_readonly(model, readonly_until)
    # add new classification layers
    model = _classification(model, output_shape)

    return model
def get_model():

    num_classes = 10
    input_shape = (MODELS[MODEL]['size'], MODELS[MODEL]['size'], 3)
    #preprocess = imagenet_utils.preprocess_input

    input_image = Input(shape=input_shape)

    if MODEL == "densenet121":
        base_model = densenet.DenseNet121(include_top=False,
                                          pooling=None,
                                          weights='imagenet',
                                          input_shape=input_shape)
    elif MODEL == "densenet169":
        base_model = densenet.DenseNet169(include_top=False,
                                          pooling=None,
                                          weights='imagenet',
                                          input_shape=input_shape)
    elif MODEL == "densenet201":
        base_model = densenet.DenseNet201(include_top=False,
                                          pooling=None,
                                          weights='imagenet',
                                          input_shape=input_shape)
    elif MODEL == "inceptionresnet":
        base_model = inception_resnet_v2.InceptionResNetV2(
            include_top=False,
            pooling=None,
            weights='imagenet',
            input_shape=input_shape)
    elif MODEL == "inception":
        base_model = inception_v3.InceptionV3(include_top=False,
                                              pooling=None,
                                              weights='imagenet',
                                              input_shape=input_shape)
    elif MODEL == "mobilenet":
        base_model = mobilenet.MobileNet(include_top=False,
                                         pooling=None,
                                         weights='imagenet',
                                         input_shape=input_shape)
    elif MODEL == "resnet":
        base_model = resnet50.ResNet50(include_top=False,
                                       pooling=None,
                                       weights='imagenet',
                                       input_shape=input_shape)
    elif MODEL == "vgg16":
        base_model = vgg16.VGG16(include_top=False,
                                 pooling=None,
                                 weights='imagenet',
                                 input_shape=input_shape)
    elif MODEL == "vgg19":
        base_model = vgg19.VGG19(include_top=False,
                                 pooling=None,
                                 weights='imagenet',
                                 input_shape=input_shape)
    else:
        print("Bad model type:", MODEL)
        sys.exit(-1)

    x = input_image
    x = base_model(x)
    x = Reshape((-1, ))(x)
    #x = Dropout(rate=?)(x)
    x = Dense(512, activation='relu', name='fc1')(x)
    x = Dropout(0.3, name='dropout_fc1')(x)
    x = Dense(128, activation='relu', name='fc2')(x)
    x = Dropout(0.3, name='dropout_fc2')(x)
    prediction = Dense(nclass, activation="softmax", name="predictions")(x)

    # this is the model we will train
    my_model = Model(inputs=(input_image), outputs=prediction)

    # compile the model (should be done *after* setting layers to non-trainable)
    opt = optimizers.Adam(lr=1e-4)
    my_model.compile(optimizer=opt,
                     loss='categorical_crossentropy',
                     metrics=['acc'])

    my_model.summary()
    return my_model
Ejemplo n.º 7
0
def get_models(model_name, NUM_CLASSES):

    #region Normal Inceptionv3,Xception,InceptionResnetV2
    if model_name == 'InceptionV3':
        model = InceptionV3(include_top=True,
                            input_shape=(299, 299, 3),
                            weights=None,
                            classes=NUM_CLASSES)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32
        # BATCH_SIZE_TRAIN = 64  # 增加GPU之后
    elif model_name == 'InceptionV4':
        from LIBS.Neural_Networks.classification import inception_v4
        model = inception_v4.create_inception_v4(nb_classes=NUM_CLASSES,
                                                 load_weights=False)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 16
    elif model_name == 'Xception':
        model = Xception(include_top=True,
                         input_shape=(299, 299, 3),
                         weights=None,
                         classes=NUM_CLASSES)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 28  # 增加GPU之后
    elif model_name == 'InceptionResNetV2':
        # following keras implemantation don't use dropout
        # 标准的不用dropout,收敛快了许多, train准确率提高
        model = InceptionResNetV2(include_top=True,
                                  input_shape=(299, 299, 3),
                                  weights=None,
                                  classes=NUM_CLASSES)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32  # 增加GPU之后
    elif model_name == 'MobileNetV2':  #Total params: 2,261,827
        # model = MobileNetV2(include_top=True, input_shape=(256, 256, 3), weights=None, classes=NUM_CLASSES)
        model = MobileNetV2(include_top=True,
                            input_shape=(224, 224, 3),
                            weights=None,
                            classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 64
    elif model_name == 'My_Xception':
        model = my_xception.Xception(classes=NUM_CLASSES)

        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32  # 增加GPU之后
    elif model_name == 'DRN_A18':  #params:20,631,682
        from LIBS.Neural_Networks.classification import my_DRN_A
        model = my_DRN_A.DRN_A_Builder.build_DRN_A_18(input_shape=(256, 256,
                                                                   3),
                                                      num_classes=NUM_CLASSES)

        IMAGE_SIZE = 256
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'DRN_A34':
        from LIBS.Neural_Networks.classification import my_DRN_A_1
        # model = my_DRN_A.DRN_A_Builder.build_DRN_A_34(input_shape=(256, 256, 3), num_classes=NUM_CLASSES)
        model = my_DRN_A_1.DRN_A_Builder.build_DRN_A_34(
            input_shape=(256, 256, 3), num_classes=NUM_CLASSES)

        IMAGE_SIZE = 256
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'DRN_A50':
        from LIBS.Neural_Networks.classification import my_DRN_A
        model = my_DRN_A.DRN_A_Builder.build_DRN_A_50(input_shape=(256, 256,
                                                                   3),
                                                      num_classes=NUM_CLASSES)

        IMAGE_SIZE = 256
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'DRN_A101':
        from LIBS.Neural_Networks.classification import my_DRN_A
        model = my_DRN_A.DRN_A_Builder.build_DRN_A_101(input_shape=(256, 256,
                                                                    3),
                                                       num_classes=NUM_CLASSES)

        IMAGE_SIZE = 256
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'DRN_C26':  #params:20,631,682
        from LIBS.Neural_Networks.classification import my_DRN_C
        model = my_DRN_C.DRN_C_Builder.build_DRN_C_26(input_shape=(224, 224,
                                                                   3),
                                                      num_classes=NUM_CLASSES)

        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'DRN_C42':  # params:30,750,978
        from LIBS.Neural_Networks.classification import my_DRN_C
        model = my_DRN_C.DRN_C_Builder.build_DRN_C_42(input_shape=(224, 224,
                                                                   3),
                                                      num_classes=NUM_CLASSES)

        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'DPN92':  #DPN92,DPN98,DPN107,DPN137
        from LIBS.Neural_Networks.classification import my_dpn
        model = my_dpn.DPN92(input_shape=(224, 224, 3),
                             weights=None,
                             classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 16  # GTX1080 ti 32 exhausted
    elif model_name == 'DPN98':
        from LIBS.Neural_Networks.classification import my_dpn
        model = my_dpn.DPN92(input_shape=(224, 224, 3),
                             weights=None,
                             classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 16  # GTX1080 ti 32 exhausted
    elif model_name == 'DPN107':
        from LIBS.Neural_Networks.classification import my_dpn
        model = my_dpn.DPN107(input_shape=(224, 224, 3),
                              weights=None,
                              classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 16  # GTX1080 ti 32 exhausted
    elif model_name == 'DPN137':
        from LIBS.Neural_Networks.classification import my_dpn
        model = my_dpn.DPN137(input_shape=(224, 224, 3),
                              weights=None,
                              classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 16  # GTX1080 ti 32 exhausted
    elif model_name == 'DenseNet121':  #121,169,201
        model = densenet.DenseNet121(input_shape=(224, 224, 3),
                                     weights=None,
                                     classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'DenseNet169':
        model = densenet.DenseNet169(input_shape=(224, 224, 3),
                                     weights=None,
                                     classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'DenseNet201':
        model = densenet.DenseNet201(input_shape=(224, 224, 3),
                                     weights=None,
                                     classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 16
    elif model_name == 'NasnetMobile':
        model = nasnet.NASNetMobile(input_shape=(224, 224, 3),
                                    weights=None,
                                    classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'my_Mnasnet':
        from LIBS.Neural_Networks.classification import my_Mnasnet
        model = my_Mnasnet.MnasNet(input_shape=(224, 224, 3),
                                   classes=NUM_CLASSES)
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 64
    elif model_name == "Mnasnet":
        from LIBS.Neural_Networks.classification import my_Mnasnet
        model = my_Mnasnet.MnasNet(classes=NUM_CLASSES,
                                   input_shape=(224, 224, 3))
        IMAGE_SIZE = 224
        BATCH_SIZE_TRAIN = 64
    elif model_name == 'NasnetMedium':  #Trainable params: 22,695,624
        model = nasnet.NASNet(
            input_shape=(299, 299, 3),
            penultimate_filters=1920,
            num_blocks=7,
            stem_block_filters=80,  # Large:96, Mobile:32
            classes=NUM_CLASSES,
            default_size=299)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 16
    elif model_name == 'NasnetLarge':
        model = nasnet.NASNetLarge(input_shape=(331, 331, 3),
                                   weights=None,
                                   classes=NUM_CLASSES)
        IMAGE_SIZE = 331
        BATCH_SIZE_TRAIN = 8
    #endregion

    #region All kinds of  ResNet, ResNeXt
    # 7*32=224, 256, 288, 320, 352, 384
    elif model_name == 'ResNet50':
        from LIBS.Neural_Networks.classification import my_resnet
        model = my_resnet.ResnetBuilder.build_resnet_50((256, 256, 3),
                                                        NUM_CLASSES)
        IMAGE_SIZE = 256
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'ResNet50_288':
        from LIBS.Neural_Networks.classification import my_resnet
        model = my_resnet.ResnetBuilder.build_resnet_50((288, 288, 3),
                                                        NUM_CLASSES)
        IMAGE_SIZE = 288
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'ResNet101':
        from LIBS.Neural_Networks.classification import my_resnet
        model = my_resnet.ResnetBuilder.build_resnet_101((256, 256, 3),
                                                         NUM_CLASSES)
        IMAGE_SIZE = 256
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'ResNet448':
        from LIBS.Neural_Networks.classification import my_resnet
        # model = my_resnet.ResnetBuilder.build_resnet_mymodel_34_64_5((448, 448, 3), NUM_CLASSES)
        model = my_resnet.ResnetBuilder.build_resnet_448_1((448, 448, 3),
                                                           NUM_CLASSES)
        IMAGE_SIZE = 448
        BATCH_SIZE_TRAIN = 32  # 增加GPU之后
    elif model_name == 'ResNext448':
        from LIBS.Neural_Networks.classification import resNeXt
        model = resNeXt.my_ResNext(input_shape=(448, 448, 3),
                                   classes=NUM_CLASSES)
        IMAGE_SIZE = 448
        BATCH_SIZE_TRAIN = 16  # 增加GPU之后

    #endregion

    #region multi label (Inceptionv3, Xception, InceptionResnetV2)
    elif model_name == 'Multi_label_InceptionV3':
        base_model = InceptionV3(include_top=False, weights=None)
        model = add_multilabels_top(base_model, NUM_CLASSES)

        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'Multi_label_SE_InceptionV3':
        from LIBS.Neural_Networks.classification import my_se_inception_v3
        base_model = my_se_inception_v3.SE_InceptionV3((299, 299, 3),
                                                       include_top=False)
        model = add_multilabels_top(base_model, NUM_CLASSES)

        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'Multi_label_Xception':
        base_model = Xception(include_top=False, weights=None)
        model = add_multilabels_top(base_model, NUM_CLASSES)

        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32  # 增加GPU之后
    elif model_name == 'Multi_label_my_Xception':
        from LIBS.Neural_Networks.classification import my_xception
        model = my_xception.Xception(classes=NUM_CLASSES, multi_labels=True)

        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32  # 增加GPU之后

    elif model_name == 'Multi_label_InceptionResNetV2':
        base_model = InceptionResNetV2(include_top=False, weights=None)
        model = add_multilabels_top(base_model, NUM_CLASSES)

        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32

    elif model_name == 'Multi_label_DRN_A_Xception':
        from LIBS.Neural_Networks import DRN_A_Xception_Builder
        base_model = DRN_A_Xception_Builder.build_DRN_A_xception(
            input_shape=(288, 288, 3), num_classes=29, include_top=False)

        model = add_multilabels_top(base_model, NUM_CLASSES)
        IMAGE_SIZE = 288
        BATCH_SIZE_TRAIN = 32

    elif model_name == 'Multi_NasnetMedium':
        base_model = nasnet.NASNet(
            input_shape=(299, 299, 3),
            penultimate_filters=1920,
            num_blocks=7,
            stem_block_filters=80,  # Large:96, Mobile:32
            classes=NUM_CLASSES,
            default_size=299,
            include_top=False)
        model = add_multilabels_top(base_model, NUM_CLASSES)

        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 16
    elif model_name == 'Multi_DRN_A18':
        from LIBS.Neural_Networks.classification import my_DRN_A
        #original number of parameters, resnet34 :21.8M, resnet50:25.6M
        base_model = my_DRN_A.DRN_A_Builder.build_DRN_A_18(
            input_shape=(288, 288, 3),
            num_classes=NUM_CLASSES,
            include_top=False)

        model = add_multilabels_top(base_model, NUM_CLASSES)
        IMAGE_SIZE = 288
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'Multi_DRN_A34':
        from LIBS.Neural_Networks.classification import my_DRN_A
        base_model = my_DRN_A.DRN_A_Builder.build_DRN_A_34(
            input_shape=(288, 288, 3),
            num_classes=NUM_CLASSES,
            include_top=False)

        model = add_multilabels_top(base_model, NUM_CLASSES)
        IMAGE_SIZE = 288
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'Multi_DRN_A50':
        from LIBS.Neural_Networks.classification import my_DRN_A
        base_model = my_DRN_A.DRN_A_Builder.build_DRN_A_50(
            input_shape=(288, 288, 3),
            num_classes=NUM_CLASSES,
            include_top=False)

        model = add_multilabels_top(base_model, NUM_CLASSES)
        IMAGE_SIZE = 288
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'Multi_DRN_C26':  #params:20,631,682
        from LIBS.Neural_Networks.classification import my_DRN_C
        base_model = my_DRN_C.DRN_C_Builder.build_DRN_C_26(
            input_shape=(288, 288, 3),
            num_classes=NUM_CLASSES,
            include_top=False)

        model = add_multilabels_top(base_model, NUM_CLASSES)
        IMAGE_SIZE = 288
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'Multi_DRN_C42':  # params:30,750,978
        from LIBS.Neural_Networks.classification import my_DRN_C
        base_model = my_DRN_C.DRN_C_Builder.build_DRN_C_42(
            input_shape=(288, 288, 3),
            num_classes=NUM_CLASSES,
            include_top=False)

        model = add_multilabels_top(base_model, NUM_CLASSES)
        IMAGE_SIZE = 288
        BATCH_SIZE_TRAIN = 32

    elif model_name == 'Multi_DRN_C58':
        from LIBS.Neural_Networks.classification import my_DRN_C
        base_model = my_DRN_C.DRN_C_Builder.build_DRN_C_58(
            input_shape=(288, 288, 3),
            num_classes=NUM_CLASSES,
            include_top=False)

        model = add_multilabels_top(base_model, NUM_CLASSES)
        IMAGE_SIZE = 288
        BATCH_SIZE_TRAIN = 32
    # endregion

    #region SE Net(Se_InceptionV3, Se_InceptionResNetV2等)
    elif model_name == 'Se_InceptionV3':
        from LIBS.Neural_Networks.classification import my_se_inception_v3
        model = my_se_inception_v3.SE_InceptionV3((299, 299, 3),
                                                  classes=NUM_CLASSES)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 48

    elif model_name == 'Se_InceptionResNetV2':
        from LIBS.Neural_Networks.classification import my_se_inception_resnet_v2
        model = my_se_inception_resnet_v2.SE_InceptionResNetV2(
            (299, 299, 3), classes=NUM_CLASSES)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32
    elif model_name == 'Se_Resnext':
        from LIBS.Neural_Networks import my_se_resnext
        model = my_se_resnext.SEResNextImageNet((299, 299, 3),
                                                classes=NUM_CLASSES)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 24
    elif model_name == 'Se_Resnet50':
        from LIBS.Neural_Networks import my_se_resnet
        model = my_se_resnet.SEResNet50((299, 299, 3), classes=NUM_CLASSES)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 32
    #endregion
    '''other models
    #region multi label SE_NET
    elif model_name == 'Multi_label_Se_InceptionV3':
        from Neural_Networks import my_se_inception_v3
        base_model = my_se_inception_v3.SE_InceptionV3((299, 299, 3), include_top=False,
                                                       classes=29, weights=None)

        x = base_model.output
        from keras.layers import GlobalAveragePooling2D, Dense
        from keras.models import Model
        x = GlobalAveragePooling2D()(x)
        predictions = Dense(NUM_CLASSES, activation='sigmoid')(x)

        model = Model(inputs=base_model.input, outputs=predictions)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 48
    elif model_name == 'Multi_label_Se_InceptionResNetV2':
        from Neural_Networks import my_se_inception_resnet_v2
        base_model = my_se_inception_resnet_v2.SE_InceptionResNetV2((299, 299, 3), include_top=False,
                                                        classes=29, weights=None)

        x = base_model.output
        from keras.layers import GlobalAveragePooling2D, Dense
        from keras.models import Model
        x = GlobalAveragePooling2D()(x)
        predictions = Dense(NUM_CLASSES, activation='sigmoid')(x)

        model = Model(inputs=base_model.input, outputs=predictions)
        IMAGE_SIZE = 299
        BATCH_SIZE_TRAIN = 24
        BATCH_SIZE_TRAIN = 32  # 增加GPU之后

    #endregion

    '''

    return model, IMAGE_SIZE, BATCH_SIZE_TRAIN
Ejemplo n.º 8
0
 def __init__(self, model_name, transfer=False, input_shape=None):
     '''
     Load pre-trained model.
     
     model_name: one of available models.
     transfer: whether to transfer learning. if True, exclude the fully-connected layer at
         the top of the network. default False
     input_shape: : Transfer learning input shape. If None, default input shape. Default None. https://keras.io/applications/
     '''
     self.name = model_name
     # vgg16
     if model_name == 'vgg16':
         import keras.applications.vgg16 as vgg16
         self.lib = vgg16
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = vgg16.VGG16(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = vgg16.VGG16(include_top=(not transfer))
     # vgg 19
     elif model_name == 'vgg19':
         import keras.applications.vgg19 as vgg19
         self.lib = vgg19
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = vgg19.VGG19(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = vgg19.VGG19(include_top=(not transfer))
     # resnet50
     elif model_name == 'resnet50':
         import keras.applications.resnet50 as resnet50
         self.lib = resnet50
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = resnet50.ResNet50(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = resnet50.ResNet50(include_top=(not transfer))
     # xception
     elif model_name == 'xception':
         import keras.applications.xception as xception
         self.lib = xception
         if transfer:
             if input_shape == None:
                 self.input_size = (299, 299)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = xception.Xception(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (299, 299)
             self.model = xception.Xception(include_top=(not transfer))
     # densenet121
     elif model_name == 'densenet121':
         import keras.applications.densenet as densenet
         self.lib = densenet
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = densenet.DenseNet121(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = densenet.DenseNet121(include_top=(not transfer))
     # densenet169
     elif model_name == 'densenet169':
         import keras.applications.densenet as densenet
         self.lib = densenet
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = densenet.DenseNet169(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = densenet.DenseNet169(include_top=(not transfer))
     # densenet201
     elif model_name == 'densenet201':
         import keras.applications.densenet as densenet
         self.lib = densenet
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = densenet.DenseNet201(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = densenet.DenseNet201(include_top=(not transfer))
     # inceptionResnetV2
     elif model_name == 'inception_resnet_v2':
         import keras.applications.inception_resnet_v2 as inception_resnet_v2
         self.lib = inception_resnet_v2
         if transfer:
             if input_shape == None:
                 self.input_size = (299, 299)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = self.lib.InceptionResNetV2(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (299, 299)
             self.model = self.lib.InceptionResNetV2(include_top=(not transfer))
     # inceptionV3
     elif model_name == 'inception_v3':
         import keras.applications.inception_v3 as inception_v3
         self.lib = inception_v3
         if transfer:
             if input_shape == None:
                 self.input_size = (299, 299)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = self.lib.InceptionV3(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (299, 299)
             self.model = self.lib.InceptionV3(include_top=(not transfer))
     # nasnet mobile
     elif model_name == 'nasnet_mobile':
         import keras.applications.nasnet as nasnet
         self.lib = nasnet
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = self.lib.NASNetMobile(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = self.lib.NASNetMobile(include_top=(not transfer))
     # nasnet large
     elif model_name == 'nasnet_large':
         import keras.applications.nasnet as nasnet
         self.lib = nasnet
         if transfer:
             if input_shape == None:
                 self.input_size = (331, 331)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = self.lib.NASNetLarge(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (331, 331)
             self.model = self.lib.NASNetLarge(include_top=(not transfer))
     # mobilenet
     elif model_name == 'mobilenet':
         import keras.applications.mobilenet as mobilenet
         self.lib = mobilenet
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = self.lib.MobileNet(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = self.lib.MobileNet(include_top=(not transfer))
     # mobilenet v2
     elif model_name == 'mobilenet_v2':
         import keras.applications.mobilenet_v2 as mobilenet_v2
         self.lib = mobilenet_v2
         if transfer:
             if input_shape == None:
                 self.input_size = (224, 224)
                 input_shape = (self.input_size[0], self.input_size[1], 3)
             else:
                 self.input_size = (input_shape[0], input_shape[1])
             self.model = self.lib.MobileNetV2(include_top=(not transfer), input_shape=input_shape)
         else:
             self.input_size = (224, 224)
             self.model = self.lib.MobileNetV2(include_top=(not transfer))
Ejemplo n.º 9
0
def save_bottlebeck_features(model_name):

    if model_name=='resnet50':
        model = resnet50.ResNet50(weights='imagenet', include_top=False, pooling='avg')

        # 2048 dimensional features
        # pooling: 1) None: output is 16x16x2048, 2) avg: 1x1x2048, 3) max: 1x1x2048
        #base_model=resnet50.ResNet50(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model = Model(inputs=base_model.input, outputs=base_model.get_layer('activation_25').output)
    elif model_name=='nasnet_large':
        model=nasnet.NASNetLarge(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        #4032 dimensional features
    elif model_name=='xception':
        model=xception.Xception(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')

        #2048 dimensional features
    elif model_name=='inceptionv3':
        model=inception_v3.InceptionV3(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')

        #2048 dimensional features
    elif model_name=='inceptionresnetv2':
        model=inception_resnet_v2.InceptionResNetV2(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        #1536 dimensional features
    elif model_name=='densenet':
        model=densenet.DenseNet201(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        # 1920 dimensional features
    else:
        model=vgg19.VGG19(weights='imagenet', include_top=False, pooling='avg')
        # 512 dimensional features
        #base_model=vgg19.VGG19(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model=Model(inputs=base_model.input,outputs=base_model.get_layer('block4_pool').output)

    images = os.listdir(wsi_dir)
    for image_name in images:
        if '.svs' in image_name:
            patient_id=image_name[0:23]
            image_features = []
            image_names = []
            #patient_id='TCGA-2F-A9KT'
            #patches=os.listdir(train_data_dir[ind]+patient_id+'*.png')
            patches=glob.glob(data_dir+patient_id+'*.png')

            for patch_name in patches:
                patch_split=patch_name.split("\\")
                img = image.load_img(patch_name, target_size=(img_height,img_width))
                # convert image to numpy array
                x = image.img_to_array(img)

                # the image is now in an array of shape (224, 224, 3)
                # need to expand it to (1, 224, 224, 3) as it's expecting a list
                x = np.expand_dims(x, axis=0)
                #imshow(np.uint8(x[0,:,:,:]))

                if model_name=='resnet50':
                    x = resnet50.preprocess_input(x)
                elif model_name=='nasnet_large':
                    x = nasnet.preprocess_input(x)
                elif model_name == 'xception':
                    x = xception.preprocess_input(x)
                elif model_name=='inceptionv3':
                    x=inception_v3.preprocess_input(x)
                elif model_name == 'inceptionresnetv2':
                    x=inception_resnet_v2.preprocess_input(x)
                elif model_name=='densenet':
                    x=densenet.preprocess_input(x)
                else:
                    x=vgg19.preprocess_input(x)

                # extract the features
                features = model.predict(x)[0]
                #features=np.mean(features,axis=(0,1))

                image_features.append(features)
                image_names.append(patch_split[1])

            if save_features==True:
                scipy.io.savemat('./step2_output/'+patient_id+'_feat.mat', mdict={'image_features': image_features, 'image_names':image_names})
Ejemplo n.º 10
0
def get_model_and_data(mode, model_name):
    if model_name == 'vgg16':
        model = vgg16.VGG16(weights='imagenet', include_top=True)
        preprocess_func = partial(vgg16.preprocess_input, mode='tf')
        target_size = (224, 224)

    elif model_name == 'vgg19':
        model = vgg19.VGG19(weights='imagenet', include_top=True)
        preprocess_func = partial(vgg19.preprocess_input, mode='tf')
        target_size = (224, 224)

    elif model_name == 'resnet50':
        model = resnet50.ResNet50(weights='imagenet', include_top=True)
        preprocess_func = partial(resnet50.preprocess_input, mode='tf')
        target_size = (224, 224)

    elif model_name == 'inception_v3':
        model = inception_v3.InceptionV3(weights='imagenet', include_top=True)
        preprocess_func = inception_v3.preprocess_input
        target_size = (299, 299)

    elif model_name == 'inception_resnet_v2':
        model = inception_resnet_v2.InceptionResNetV2(weights='imagenet',
                                                      include_top=True)
        preprocess_func = inception_resnet_v2.preprocess_input
        target_size = (299, 299)

    elif model_name == 'xception':
        model = xception.Xception(weights='imagenet', include_top=True)
        preprocess_func = xception.preprocess_input
        target_size = (299, 299)

    elif model_name == 'mobilenet':
        model = mobilenet.MobileNet(weights='imagenet', include_top=True)
        preprocess_func = mobilenet.preprocess_input
        target_size = (224, 224)

    elif model_name.startswith('densenet'):
        model_type = int(model_name[len('densenet'):])

        if model_type == 121:
            model = densenet.DenseNet121(weights='imagenet', include_top=True)
        elif model_type == 169:
            model = densenet.DenseNet169(weights='imagenet', include_top=True)
        elif model_type == 201:
            model = densenet.DenseNet201(weights='imagenet', include_top=True)
        else:
            raise ValueError(
                f'Got incorrect DenseNet model type ({model_type}).')

        preprocess_func = densenet.preprocess_input
        target_size = (224, 224)

    else:
        raise ValueError(f'Got unknown NN model ({model_name}).')

    if mode == 'train':
        input_dir = cfg.TRAIN_IMAGES_DIR

    elif mode == 'test':
        input_dir = cfg.TEST_IMAGES_DIR
    else:
        raise ValueError(f'Got unknown proc mode ({mode}).')

    output_file = cfg.NN_IMAGE_FEATURES[model_name][mode]['memmap']

    return model, input_dir, output_file, preprocess_func, target_size
Ejemplo n.º 11
0
    def InitModel(self, base_model):
        self.base_model = base_model
        if self.base_model == 'VGG16':
            self.conv_base = VGG16(include_top=False,
                                   input_shape=(self.img_high, self.img_width, 3))
            self.trainable_layers = ['block5_conv1', 'block5_conv2', 'block5_conv3','block4_conv3','block4_conv2',
                                     'block4_conv2','block3_conv3','block3_conv2','block3_conv1']
            #self.trainable_layers = ['block5_conv1', 'block5_conv2','block5_conv3']
        elif self.base_model == 'VGG19':
            self.conv_base = VGG19(weights='imagenet', include_top=False,
                                   input_shape=(self.img_high, self.img_width, 3))
            self.trainable_layers = ['block5_conv1', 'block5_conv2', 'block5_conv3', 'block5_conv4']
        elif self.base_model == 'ResNet50':
            self.conv_base = resnet50.ResNet50(weights='imagenet', include_top=False,
                                               input_shape=(self.img_high, self.img_width, 3))
            self.trainable_layers = ['res5c_branch2c', 'res5c_branch2b', 'res5c_branch2a']
        elif self.base_model == 'DenseNet121':
            self.conv_base = densenet.DenseNet121(weights='imagenet', include_top=False,
                                                  input_shape=(self.img_high, self.img_width, 3))
            self.trainable_layers = ['conv5_block16_2_conv', 'conv5_block16_1_conv', 'conv5_block15_2_conv',
                                'conv5_block15_1_conv']
        elif self.base_model == 'DenseNet201':
            self.conv_base = densenet.DenseNet201(weights='imagenet', include_top=False,
                                                  input_shape=(self.img_high, self.img_width, 3))
            self.trainable_layers = ['conv5_block32_2_conv', 'conv5_block32_1_conv', 'conv5_block31_2_conv',
                                'conv5_block31_1_conv']
        elif self.base_model == 'MobileNet':
            self.conv_base = mobilenet.MobileNet(weights='imagenet', include_top=False,
                                                 input_shape=(self.img_high, self.img_width, 3))
        elif self.base_model == 'InceptionV3':
            self.conv_base = inception_v3.InceptionV3(weights='imagenet', include_top=False,
                                                      input_shape=(self.img_high, self.img_width, 3))
            self.trainable_layers = ['conv2d_86', 'conv2d_94', 'conv2d_89', 'conv2d_87', 'conv2d_88', 'conv2d_93','conv2d_92']
            #self.trainable_layers = ['conv2d_94']
        elif self.base_model == 'Xception':
            self.conv_base = xception.Xception(weights='imagenet', include_top=False,
                                               input_shape=(self.img_high, self.img_width, 3))
            #self.trainable_layers = ['block14_sepconv2', 'block14_sepconv1']
            self.trainable_layers = ['conv2d_4']
        elif self.base_model == 'InceptionResNetV2':
            self.conv_base = InceptionResNetV2(weights='imagenet', include_top=False,
                                               input_shape=(self.img_high, self.img_width, 3))
            self.trainable_layers = ['conv7b', 'block8_10_conv', 'conv2d_203', 'conv2d_202', 'conv2d_200', 'conv2d_201']

        self.conv_base.summary()
        #plot_model(self.conv_base, os.path.join(self.model_dir, self.base_model + '.pdf'), show_shapes=False)


        
        x = self.conv_base.get_layer('block5_pool').output
        #x =layers.MaxPool2D(pool_size=(4,4))(x)
        #x = layers.Dropout(0.5)(x)
        x = layers.Flatten(name='flatten_1_'+self.base_model)(x)
        #x = layers.Dense(800, activation='relu')(x)
        x = layers.Dense(1024, activation='relu')(x)
        #x = layers.Dense(512, activation='relu')(x)
        #x = layers.Dense(512, activation='relu')(x)
        x = layers.Dense(self.classes, activation='softmax')(x)
        self.model = models.Model(inputs=self.conv_base.input, outputs=x,name='model_'+self.base_model)


        print(self.model.name)
        print('- Trainable Weights before freezing: {0}'.format(len(self.model.trainable_weights)))

        self.conv_base.trainable = True
        for layer in self.conv_base.layers:
            if (layer.name in self.trainable_layers):
                layer.trainable = True
            else:
                layer.trainable = False

        print('- Trainable Weights after freezing: {0}'.format(len(self.model.trainable_weights)))

        self.model.summary()
Ejemplo n.º 12
0
def get_model_and_data(mode, model_name):
    if model_name == "vgg16":
        model = vgg16.VGG16(weights="imagenet", include_top=True)
        preprocess_func = partial(vgg16.preprocess_input, mode="tf")
        target_size = (224, 224)

    elif model_name == "vgg19":
        model = vgg19.VGG19(weights="imagenet", include_top=True)
        preprocess_func = partial(vgg19.preprocess_input, mode="tf")
        target_size = (224, 224)

    elif model_name == "resnet50":
        model = resnet50.ResNet50(weights="imagenet", include_top=True)
        preprocess_func = partial(resnet50.preprocess_input, mode="tf")
        target_size = (224, 224)

    elif model_name == "inception_v3":
        model = inception_v3.InceptionV3(weights="imagenet", include_top=True)
        preprocess_func = inception_v3.preprocess_input
        target_size = (299, 299)

    elif model_name == "inception_resnet_v2":
        model = inception_resnet_v2.InceptionResNetV2(weights="imagenet",
                                                      include_top=True)
        preprocess_func = inception_resnet_v2.preprocess_input
        target_size = (299, 299)

    elif model_name == "xception":
        model = xception.Xception(weights="imagenet", include_top=True)
        preprocess_func = xception.preprocess_input
        target_size = (299, 299)

    elif model_name == "mobilenet":
        model = mobilenet.MobileNet(weights="imagenet", include_top=True)
        preprocess_func = mobilenet.preprocess_input
        target_size = (224, 224)

    elif model_name.startswith("densenet"):
        model_type = int(model_name[len("densenet"):])

        if model_type == 121:
            model = densenet.DenseNet121(weights="imagenet", include_top=True)
        elif model_type == 169:
            model = densenet.DenseNet169(weights="imagenet", include_top=True)
        elif model_type == 201:
            model = densenet.DenseNet201(weights="imagenet", include_top=True)
        else:
            raise ValueError(
                f"Got incorrect DenseNet model type ({model_type}).")

        preprocess_func = densenet.preprocess_input
        target_size = (224, 224)

    else:
        raise ValueError(f"Got unknown NN model ({model_name}).")

    if mode == "train":
        input_dir = cfg.TRAIN_IMAGES_DIR

    elif mode == "test":
        input_dir = cfg.TEST_IMAGES_DIR
    else:
        raise ValueError(f"Got unknown proc mode ({mode}).")

    output_file = cfg.NN_IMAGE_FEATURES[model_name][mode]["memmap"]

    return model, input_dir, output_file, preprocess_func, target_size
    cnn_network = inception_v3.InceptionV3(include_top=False,
                                           weights='imagenet',
                                           input_shape=input_shape)
    print("InceptionV3 model selected.")

if (network == 5):
    from keras.applications import xception
    cnn_network = xception.Xception(include_top=False,
                                    weights='imagenet',
                                    input_shape=input_shape)
    print("Xception model selected.")

if (network == 6):
    from keras.applications import densenet
    cnn_network = densenet.DenseNet201(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
    print("DenseNet model selected.")

output = cnn_network.layers[-1].output
output = keras.layers.Flatten()(output)
cnn_model = Model(cnn_network.input, output)
print("Step15 completed")

print(type(index))
if (index == 1):
    print("Step16 started")
    print("Processing Transfer Learning by Feature Extraction methodology")
    cnn_model.trainable = False
    for layer in cnn_model.layers:
        layer.trainable = False
def save_bottlebeck_features(model_name):

    if model_name == 'resnet50':
        model = resnet50.ResNet50(weights='imagenet',
                                  include_top=False,
                                  pooling='avg')
        feat_output = [
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/2)resnet50/',
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/2)resnet50/'
        ]

        # 2048 dimensional features
        # pooling: 1) None: output is 16x16x2048, 2) avg: 1x1x2048, 3) max: 1x1x2048
        #base_model=resnet50.ResNet50(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model = Model(inputs=base_model.input, outputs=base_model.get_layer('activation_25').output)
    elif model_name == 'nasnet_large':
        model = nasnet.NASNetLarge(input_shape=(img_height, img_width, 3),
                                   weights='imagenet',
                                   include_top=False,
                                   pooling='avg')
        #4032 dimensional features
    elif model_name == 'xception':
        model = xception.Xception(input_shape=(img_height, img_width, 3),
                                  weights='imagenet',
                                  include_top=False,
                                  pooling='avg')
        feat_output = [
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/4)xception/',
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/4)xception/'
        ]

        #2048 dimensional features
    elif model_name == 'inceptionv3':
        model = inception_v3.InceptionV3(input_shape=(img_height, img_width,
                                                      3),
                                         weights='imagenet',
                                         include_top=False,
                                         pooling='avg')
        feat_output = [
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/5)inceptionv3/',
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/5)inceptionv3/'
        ]
        #2048 dimensional features
    elif model_name == 'inceptionresnetv2':
        model = inception_resnet_v2.InceptionResNetV2(input_shape=(img_height,
                                                                   img_width,
                                                                   3),
                                                      weights='imagenet',
                                                      include_top=False,
                                                      pooling='avg')
        #1536 dimensional features
    elif model_name == 'densenet':
        model = densenet.DenseNet201(input_shape=(img_height, img_width, 3),
                                     weights='imagenet',
                                     include_top=False,
                                     pooling='avg')
        # 1920 dimensional features
    else:
        model = vgg19.VGG19(weights='imagenet',
                            include_top=False,
                            pooling='avg')
        # 512 dimensional features
        #base_model=vgg19.VGG19(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model=Model(inputs=base_model.input,outputs=base_model.get_layer('block4_pool').output)

    #for i,layer in enumerate(model.layers):
    #    print(i,layer.name)

    #print(model.summary())

    for ind in range(1, len(img_path)):
        path_ind = img_path[ind]
        #path_split=path_ind.split("/")
        images = os.listdir(path_ind)
        for image_name in images:
            if '.svs' in image_name:
                patient_id = image_name[0:23]
                image_features = []
                image_names = []

                ss = time.time()
                patient_id = 'TCGA-2F-A9KO'
                #patches=os.listdir(train_data_dir[ind]+patient_id+'*.png')
                patches = glob.glob(train_data_dir[0] + patient_id + '*.png')

                for patch_name in patches:
                    patch_split = patch_name.split("\\")
                    img = image.load_img(patch_name,
                                         target_size=(img_height, img_width))
                    # convert image to numpy array
                    x = image.img_to_array(img)

                    # the image is now in an array of shape (224, 224, 3)
                    # need to expand it to (1, 224, 224, 3) as it's expecting a list
                    x = np.expand_dims(x, axis=0)
                    #imshow(np.uint8(x[0,:,:,:]))

                    if model_name == 'resnet50':
                        x = resnet50.preprocess_input(x)
                    elif model_name == 'nasnet_large':
                        x = nasnet.preprocess_input(x)
                    elif model_name == 'xception':
                        x = xception.preprocess_input(x)
                    elif model_name == 'inceptionv3':
                        x = inception_v3.preprocess_input(x)
                    elif model_name == 'inceptionresnetv2':
                        x = inception_resnet_v2.preprocess_input(x)
                    elif model_name == 'densenet':
                        x = densenet.preprocess_input(x)
                    else:
                        x = vgg19.preprocess_input(x)

                    # extract the features
                    features = model.predict(x)[0]
                    #features=np.mean(features,axis=(0,1))

                    image_features.append(features)
                    image_names.append(patch_split[1])

                se = time.time() - ss
                print(se)
                if save_features == True:
                    scipy.io.savemat(feat_output[ind] + patient_id +
                                     '_feat.mat',
                                     mdict={
                                         'image_features': image_features,
                                         'image_names': image_names
                                     })
Ejemplo n.º 15
0
print('image_test shape:', image_test.shape)
print(image_test.shape[0], 'test samples')

base_model = densenet.DenseNet169(weights='imagenet', input_shape=input_shape, include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(12, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)

model.load_weights("DenseNet169.hdf5")
predict_results = model.predict(image_test);

base_model3 = densenet.DenseNet201(weights='imagenet', input_shape=input_shape, include_top=False)
x3 = base_model3.output
x3 = GlobalAveragePooling2D()(x3)
x3 = Dropout(0.5)(x3)
x3 = Dense(1024, activation='relu')(x3)
x3 = Dropout(0.5)(x3)
predictions3 = Dense(12, activation='softmax')(x3)
model3 = Model(inputs=base_model3.input, outputs=predictions3)

model3.load_weights("DenseNet201.hdf5")
predict_results3 = model3.predict(image_test);

predict_results += predict_results3;

writer = csv.writer(open("predictLog_DenseNet201_plus_DenseNet169_gen.csv", "w"), lineterminator='\n');
writer.writerow(['filename','Black-grass', 'Charlock', 'Cleavers', 'Common Chickweed', 'Common wheat', 'Fat Hen', 'Loose Silky-bent', 'Maize', 'Scentless Mayweed', 'Shepherds Purse',