Пример #1
0
def getCNN_Model(use_vgg16=use_vgg16):
    if use_vgg16:
        modelID = 'VGG16'
        inp = (224, 224, 3)
        modelPackage = vgg16
        margins = (8, 8, 48, 48)
        Target_Frame_Shape = (240, 320, 3)
        cnn_model = vgg16.VGG16(weights='imagenet',
                                input_shape=inp,
                                include_top=include_vgg_top)
    elif True:
        inp = (299, 299, 3)
        modelPackage = inception_v3
        modelID = 'InceptionV3'
        margins = (0, 1, 51, 50)
        Target_Frame_Shape = (300, 400, 3)
        cnn_model = inception_v3.InceptionV3(weights='imagenet',
                                             input_shape=inp,
                                             include_top=include_vgg_top)
    else:
        inp = (331, 331, 3)
        modelPackage = nasnet
        modelID = 'NASNetLarge'
        margins = (14, 15, 74, 75)
        Target_Frame_Shape = (360, 480, 3)
        cnn_model = nasnet.NASNetLarge(weights='imagenet',
                                       input_shape=inp,
                                       include_top=include_vgg_top)

    def preprocess_input(imagePath):
        return preprocess_input_for_model(imagePath, Target_Frame_Shape,
                                          margins, modelPackage)

    if include_vgg_top:
        modelID = modelID + '_inc_top'
        #cnn_model = addDropout(cnn_model)
        cnn_model.layers.pop()
        cnn_model.outputs = [cnn_model.layers[-1].output]
        cnn_model.output_layers = [cnn_model.layers[-1]]
        cnn_model.layers[-1].outbound_nodes = []
        for layer in cnn_model.layers:
            layer.trainable = False
        x = cnn_model.layers[-1].output  #
        """
        x = Dropout(0.25, name = 'dropout3_025')(x) #
        x = Dense(1024, activation='relu', name='fc1024')(x) #
        x = Dropout(0.25, name = 'dropout_025')(x) #
        x = Dense(num_outputs, name = 'fc3')(x) #
        """

        #a = Input(shape=(num_outputs, ), name='aux_input0')

        #x = (concatenate([x, a], axis = 1))#

        cnn_model = Model(inputs=cnn_model.input, outputs=x)
    return inp, cnn_model, modelID, preprocess_input
Пример #2
0
    def __init__(self, model, input_size):

        input_shape = (input_size, input_size, 3)

        if model == 'xception':
            base_model = xception.Xception(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        elif model == 'vgg16':
            base_model = vgg16.VGG16(weights='imagenet',
                                     include_top=False,
                                     pooling='max',
                                     input_shape=input_shape)
        elif model == 'vgg19':
            base_model = vgg19.VGG19(weights='imagenet',
                                     include_top=False,
                                     pooling='max',
                                     input_shape=input_shape)
        elif model == 'inception_v3':
            base_model = inception_v3.InceptionV3(weights='imagenet',
                                                  include_top=False,
                                                  pooling='max',
                                                  input_shape=input_shape)
        elif model == 'mobilenet':
            base_model = mobilenet.MobileNet(weights='imagenet',
                                             include_top=False,
                                             pooling='max',
                                             input_shape=input_shape)
        elif model == 'inception_resnet_v2':
            base_model = inception_resnet_v2.InceptionResNetV2(
                weights='imagenet',
                include_top=False,
                pooling='max',
                input_shape=input_shape)
        elif model == 'resnet50':
            base_model = resnet50.ResNet50(weights='imagenet',
                                           include_top=False,
                                           pooling='max',
                                           input_shape=input_shape)
        elif model == 'nasnetlarge':
            base_model = nasnet.NASNetLarge(weights='imagenet',
                                            include_top=False,
                                            pooling='max',
                                            input_shape=input_shape)
        else:
            base_model = nasnet.NASNetMobile(weights='imagenet',
                                             include_top=False,
                                             pooling='max',
                                             input_shape=input_shape)

        self.input_size = input_size
        self.model = base_model
        self.graph = tf.get_default_graph()
        base_model.summary()
Пример #3
0
    def load_imagenet_model(self):
        """
        Initialize the pre-trained model architecture and load the model weights.
        The downloaded weights contains only the convolution base. It does not
        contain the top two dense layers. We will have to manually define the top
        two dense layers. The size_dict dictionary object will hold the input sizes
        for various models, which will be further used to train the respective models
        with the given input image dimensions.
        
        Arguments:                    
    
            -model_name : Name of the model, for example - vgg16, inception_v3, resnet50 etc
    
        """

        if (self.input_params['model_name'] == "vgg16"):
            base_model = vgg16.VGG16(weights=None, include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5")
        elif (self.input_params['model_name'] == "inceptionv3"):
            base_model = inception_v3.InceptionV3(weights=None,
                                                  include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5")
        elif (self.input_params['model_name'] == "resnet50"):
            base_model = resnet.ResNet50(weights=None, include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5")
        elif (self.input_params['model_name'] == "inception_resnet"):
            base_model = inception_resnet_v2.InceptionResNetV2(
                weights=None, include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5"
            )
        elif (self.input_params['model_name'] == "nasnet"):
            base_model = nasnet.NASNetLarge(weights=None, include_top=False)
            base_model.load_weights(self.path_dict["weights_path"] +
                                    "NASNet-large-no-top.h5")
        elif (self.input_params['model_name'] == "xception"):
            base_model = xception.Xception(weights=None, include_top=False)
            base_model.load_weights(
                self.path_dict["weights_path"] +
                "xception_weights_tf_dim_ordering_tf_kernels.h5")
        return base_model
Пример #4
0
def create_model(feature_extraction_method, num_classes, path_cnn_pre_trained,
                 input_size):

    if (feature_extraction_method == 'fine_tuning_lenet'):
        model = load_model(path_cnn_pre_trained)
        input_image = input_size
    if (feature_extraction_method == 'fine_tuning_vgg16'):
        model = vgg16.VGG16(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'fine_tuning_vgg19'):
        model = vgg19.VGG19(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'fine_tuning_xception'):
        model = xception.Xception(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'fine_tuning_resnet'):
        model = resnet.ResNet50(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 224
    elif (feature_extraction_method == 'fine_tuning_inception_resnet'):
        model = inception_resnet.InceptionResNetV2(weights='imagenet',
                                                   include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'fine_tuning_nasnet'):
        model = nasnet.NASNetLarge(weights='imagenet', include_top=True)
        #layer_name = 'global_average_pooling2d_1'
        input_image = 331

    #Removing the last layer
    model.layers.pop()
    new_layer = Dense(num_classes, activation='softmax', name='predictions')
    model = Model(model.input, new_layer(model.layers[-1].output))

    model.summary()

    return model, input_image
    def __init__(self, weights=None, cnn_model_type='nasnet', n_gpu=1):
        """Either load pretrained from imagenet, or load our saved
        weights from our own training."""

        self.weights = weights  # so we can check elsewhere which model

        if weights is None:
            # Get model with pretrained weights.
            if cnn_model_type == 'InceptionV3':
                self.model = inception_v3.InceptionV3(
                    weights='imagenet',pooling='avg',
                    include_top=False
                )
            elif cnn_model_type == 'nasnet':
                base_model = nasnet.NASNetLarge(
                    weights='imagenet',
                    include_top=True
                )
                # issue https://github.com/keras-team/keras/issues/10109
                self.model = Model(
                    inputs=base_model.input,
                    outputs=base_model.get_layer('global_average_pooling2d_1').output
                )

        else:
            # Load the model first.
            self.model = load_model(weights)
            # Then remove the top so we get features not predictions.
            # From: https://github.com/fchollet/keras/issues/2371
            self.model.layers.pop()
            self.model.layers.pop()  # two pops to get to pool layer
            self.model.outputs = [self.model.layers[-1].output]
            self.model.output_layers = [self.model.layers[-1]]
            self.model.layers[-1].outbound_nodes = []

        if n_gpu>1:
            self.model = multi_gpu_model(self.model,n_gpu)
Пример #6
0
def create_model(feature_extraction_method, path_cnn_pre_trained, input_size):

    if (feature_extraction_method == 'pretrained_lenet'):
        model = load_model(path_cnn_pre_trained)
        input_image = input_size
    elif (feature_extraction_method == 'pretrained_vgg16'):
        model = vgg16.VGG16(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_vgg19'):
        model = vgg19.VGG19(weights='imagenet', include_top=True)
        #layer_name = 'fc2'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_xception'):
        model = xception.Xception(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'pretrained_resnet'):
        model = resnet.ResNet50(weights='imagenet', include_top=True)
        #layer_name = 'avg_pool'
        input_image = 224
    elif (feature_extraction_method == 'pretrained_inception_resnet'):
        model = inception_resnet.InceptionResNetV2(weights='imagenet',
                                                   include_top=True)
        #layer_name = 'avg_pool'
        input_image = 299
    elif (feature_extraction_method == 'pretrained_nasnet'):
        model = nasnet.NASNetLarge(weights='imagenet', include_top=True)
        #layer_name = 'global_average_pooling2d_1'
        input_image = 331

    intermediate_layer_model = Model(inputs=model.input,
                                     outputs=model.layers[-2].output)

    model.summary()

    return intermediate_layer_model, input_image
Пример #7
0
def main():
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument("train_class", type=str, choices=classes.keys())
    parser.add_argument("data_dir", type=str)
    parser.add_argument('-e', "--epoch",
                        required=False,
                        type=int,
                        default=64,
                        dest="epoch")
    parser.add_argument('-ef', "--epoch-fine-tune",
                        required=False,
                        type=int,
                        default=200,
                        dest="epoch_fine_tune")
    parser.add_argument('-b', '--batch-size',
                        required=False,
                        default=1024,
                        type=int,
                        dest="batch")
    parser.add_argument('-lr', '--learning-rate',
                        required=False,
                        default=1e-4,
                        type=float,
                        dest="lr")
    parser.add_argument('-decay', '--learning-rate-decay',
                        required=False,
                        default=1e-6,
                        type=float,
                        dest="decay")
    parser.add_argument('-ignore-npz', '--ignore-precomputed-learning-file',
                        required=False,
                        default=False,
                        type=bool,
                        dest="ignore_npz")
    parser.add_argument('-ri', '--use-random-weight-initialisation',
                        required=False,
                        default=False,
                        type=bool,
                        dest="random_init")
    parser.add_argument('-ua', '--unfroze-all-convolution-layer-directly',
                        required=False,
                        default=False,
                        type=bool,
                        dest="unfroze_all")
    parser.add_argument('-m', '--model-name',
                        required=False,
                        default="MobileNetV2",
                        type=str,
                        dest="model_name")
    parser.add_argument('-d', '--dense-layer-size',
                        required=False,
                        nargs="*",
                        default=[],
                        type=int,
                        dest="dense_size")
    parser.add_argument('-is', '--input-size',
                        required=False,
                        default=96,
                        type=int,
                        dest="input_size")
    parser.add_argument('-viz', '--data-visualisation',
                        required=False,
                        default=False,
                        type=bool,
                        dest="data_visualisation")
    args = parser.parse_args()
    batch_size = args.batch

    class_name = args.train_class
    out_classes = classes[class_name]["signs_classes"]
    rotation_and_flips = classes[class_name]["rotation_and_flips"]
    h_symmetry_classes = classes[class_name]["h_symmetry"]
    try:
        merge_sign_classes = classes[class_name]["merge_sign_classes"]
    except KeyError:
        merge_sign_classes = None

    mapping = {c: i for i, c in enumerate(out_classes)}
    mapping_id_to_name = {i: c for c, i in mapping.items()}

    os.makedirs(class_name, exist_ok=True)

    x_train, y_train, x_test, y_test = get_data_for_master_class(class_name=class_name,
                                                                 mapping=mapping,
                                                                 mapping_id_to_name=mapping_id_to_name,
                                                                 rotation_and_flips=rotation_and_flips,
                                                                 data_dir=args.data_dir,
                                                                 merge_sign_classes=merge_sign_classes,
                                                                 h_symmetry_classes=h_symmetry_classes,
                                                                 image_size=(args.input_size, args.input_size),
                                                                 ignore_npz=args.ignore_npz,
                                                                 out_classes=out_classes)
    if args.data_visualisation:
        preprocess_input = lambda x: x
        model = None
    else:
        if args.random_init:
            weights = None
        else:
            weights = 'imagenet'
        if args.model_name == "MobileNetV2":
            preprocess_input = mobilenetv2.preprocess_input
            base_model = mobilenetv2.MobileNetV2(weights=weights,
                                                 include_top=False,
                                                 input_shape=(args.input_size, args.input_size, 3),
                                                 pooling='avg')
        elif args.model_name == "InceptionResNetV2":
            preprocess_input = inception_resnet_v2.preprocess_input
            base_model = inception_resnet_v2.InceptionResNetV2(weights=weights,
                                                               include_top=False,
                                                               input_shape=(args.input_size, args.input_size, 3),
                                                               pooling='avg')
        elif args.model_name == "NASNetLarge":
            preprocess_input = nasnet.preprocess_input
            base_model = nasnet.NASNetLarge(weights=weights,
                                            include_top=False,
                                            input_shape=(args.input_size, args.input_size, 3),
                                            pooling='avg')
        else:
            raise ValueError("unknown model name {}, should be one of {}".format(args.model_name,
                                                                                 ["MobileNetV2", "InceptionResNetV2",
                                                                                  "NASNetLarge"]))
        predictions = base_model.outputs[0]
        for s in args.dense_size:
            predictions = Dense(s, activation='relu')(predictions)
        predictions = Dense(len(out_classes), activation='softmax')(predictions)
        model = Model(inputs=base_model.input, outputs=predictions)

    # model.summary()
    # blocks = {}
    # for i, layer in enumerate(base_model.layers):
    #     s = layer.name.split('_')
    #     if s[0] == "block":
    #         b = int(s[1])
    #         if b not in blocks:
    #             blocks[b] = [i]
    #         else:
    #             blocks[b].append(i)
    # exit(0)

    callbacks = [ModelCheckpoint(filepath="{}/checkpoint.h5".format(class_name),
                                 monitor="val_loss",
                                 mode='min',
                                 verbose=0,
                                 save_best_only="True",
                                 save_weights_only=False,
                                 period=1),
                 EarlyStopping(monitor='val_acc',
                               mode='max',
                               min_delta=0.001,
                               patience=40,
                               verbose=1,
                               restore_best_weights=True)
                 ]

    x_test = np.stack([preprocess_input(i) for i in x_test])
    datagen = ImageDataGenerator(featurewise_center=False,
                                 featurewise_std_normalization=False,
                                 rotation_range=10,
                                 width_shift_range=0.1,
                                 height_shift_range=0.1,
                                 brightness_range=(0.5, 1.4),
                                 shear_range=3.0,
                                 zoom_range=(0.7, 1.1),
                                 fill_mode='nearest',
                                 horizontal_flip=False,
                                 vertical_flip=False,
                                 preprocessing_function=preprocess_input)
    datagen.fit(x_train)

    if args.data_visualisation:
        for b in datagen.flow(x_train, y_train, batch_size=1):
            im, im_class = b[0][0], b[1][0]
            im_class = int(np.argmax(im_class))
            plt.imshow(im.astype(np.int))
            plt.title(out_classes[im_class])
            plt.show()
        return

    if not args.random_init:
        # if the network is not randomly initialized, we first fine tune the last layers
        for layer in base_model.layers:
            layer.trainable = False
        model.compile(optimizer=rmsprop(lr=args.lr, decay=args.decay),
                      loss='categorical_crossentropy', metrics=["accuracy"])
        history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                      steps_per_epoch=ceil(len(x_train) / batch_size),
                                      epochs=args.epoch,
                                      verbose=1,
                                      validation_data=(x_test, y_test),
                                      use_multiprocessing=True,
                                      callbacks=callbacks)
        plot_history(history, "{0}/{1}_{0}_dense_".format(class_name, args.model_name))
        model.save("{0}/{1}_{0}_dense.h5".format(class_name, args.model_name), overwrite=True)

        if not args.unfroze_all:
            # unfroze the 3 last blocks of mobile net
            for layer in model.layers[:113]:
                layer.trainable = False
            for layer in model.layers[113:]:
                layer.trainable = True
            model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay),
                          loss='categorical_crossentropy', metrics=["accuracy"])
            history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                          steps_per_epoch=ceil(len(x_train) / batch_size),
                                          epochs=args.epoch_fine_tune,
                                          verbose=1,
                                          validation_data=(x_test, y_test),
                                          use_multiprocessing=True,
                                          callbacks=callbacks)
            plot_history(history, "{0}/{1}_{0}_fine_tuning_1_".format(class_name, args.model_name))

            model.save("{0}/{1}_{0}_1.h5".format(class_name, args.model_name), overwrite=True)

            # unfroze the 6 last blocks of mobile net
            for layer in model.layers[:87]:
                layer.trainable = False
            for layer in model.layers[87:]:
                layer.trainable = True
            model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay),
                          loss='categorical_crossentropy', metrics=["accuracy"])
            history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                          steps_per_epoch=ceil(len(x_train) / batch_size),
                                          epochs=args.epoch_fine_tune,
                                          verbose=1,
                                          validation_data=(x_test, y_test),
                                          use_multiprocessing=True,
                                          callbacks=callbacks)
            plot_history(history, "{0}/{1}_{0}_fine_tuning_2_".format(class_name, args.model_name))

            model.save("{0}/{1}_{0}_2.h5".format(class_name, args.model_name), overwrite=True)

    # unfroze all model
    for layer in model.layers:
        layer.trainable = True
    model.compile(optimizer=SGD(lr=args.lr, momentum=0.9, decay=args.decay),
                  loss='categorical_crossentropy', metrics=["accuracy"])
    history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                                  steps_per_epoch=ceil(len(x_train) / batch_size),
                                  epochs=args.epoch_fine_tune,
                                  verbose=1,
                                  validation_data=(x_test, y_test),
                                  use_multiprocessing=True,
                                  callbacks=callbacks)
    plot_history(history, "{0}/{1}_{0}_fine_tuning_f_".format(class_name, args.model_name))

    model.save("{0}/{1}_{0}_final.h5".format(class_name, args.model_name), overwrite=True)
Пример #8
0
if K.image_data_format() == 'channels_first':
    combination_image = K.placeholder((1, 3, img_nrows, img_ncols))
else:
    combination_image = K.placeholder((1, img_nrows, img_ncols, 3))

# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate(
    [base_image, style_reference_image, combination_image], axis=0)
print(input_tensor)

# build the VGG16 network with our 3 images as input
# the model will be loaded with pre-trained ImageNet weights
# model = vgg19.VGG19(input_tensor=input_tensor,
#                     weights='imagenet', include_top=False)
model = nasnet.NASNetLarge(input_tensor=input_tensor,
                           input_shape=(400, 300, 3),
                           weights='imagenet',
                           include_top=False)
# model = vgg19.VGG19(input_tensor=input_tensor, input_shape=(400, 300, 3),
#                      weights='imagenet', include_top=False)
print('Model loaded.')

# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
for i in outputs_dict:
    print(i)

# compute the neural style loss
# first we need to define 4 util functions

# the gram matrix of an image tensor (feature-wise outer product)
Пример #9
0
def nasnet_download():
    print("开始下载NASNetLarge")
    nasnet.NASNetLarge()
    print("完成下载NASNetLarge")
Пример #10
0
def naslarge(input_image, **kwargs):
    from keras.applications import nasnet
    model = nasnet.NASNetLarge(input_tensor=input_image,
                               include_top=False,
                               **kwargs)
    return model
Пример #11
0
    #print('Total params: {:,}'.format(trainable_count + non_trainable_count))
    #print('Trainable params: {:,}'.format(trainable_count))
    #print('Non-trainable params: {:,}'.format(non_trainable_count))

paramcount_list= []
for model_name in ["vgg16","nasnet","inception_resnet","inceptionv3","xception"]:
    if(model_name=="vgg16"):
        base_model = vgg16.VGG16(weights=None, include_top=False)
    elif(model_name=="inceptionv3"):
        base_model = inception_v3.InceptionV3(weights=None, include_top=False)
    elif(model_name=="resnet50"):
        base_model = resnet.ResNet50(weights=None, include_top=False)
    elif(model_name=="inception_resnet"):
        base_model = inception_resnet_v2.InceptionResNetV2(weights=None, include_top=False)
    elif(model_name=="nasnet"):
        base_model = nasnet.NASNetLarge(weights=None, include_top=False)
    elif(model_name=="xception"):
        base_model = xception.Xception(weights=None, include_top=False)
    print("model name is:",model_name)
    paramcount_list.append((model_name,"base",count_params(base_model)))
    #Adding a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    customlayers= customlayer()
    output_layer = customlayers(x)
    model_stg1 = Model(inputs=base_model.input, outputs=output_layer)
    model_json = model_stg1.to_json()
    modelpath= liby+model_name+".json"
    with open(modelpath, "w") as json_file:
        json_file.write(model_json)
        modelpath= liby+model_name+".pdf"
Пример #12
0
def save_bottlebeck_features(model_name):

    if model_name=='resnet50':
        model = resnet50.ResNet50(weights='imagenet', include_top=False, pooling='avg')

        # 2048 dimensional features
        # pooling: 1) None: output is 16x16x2048, 2) avg: 1x1x2048, 3) max: 1x1x2048
        #base_model=resnet50.ResNet50(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model = Model(inputs=base_model.input, outputs=base_model.get_layer('activation_25').output)
    elif model_name=='nasnet_large':
        model=nasnet.NASNetLarge(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        #4032 dimensional features
    elif model_name=='xception':
        model=xception.Xception(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')

        #2048 dimensional features
    elif model_name=='inceptionv3':
        model=inception_v3.InceptionV3(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')

        #2048 dimensional features
    elif model_name=='inceptionresnetv2':
        model=inception_resnet_v2.InceptionResNetV2(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        #1536 dimensional features
    elif model_name=='densenet':
        model=densenet.DenseNet201(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        # 1920 dimensional features
    else:
        model=vgg19.VGG19(weights='imagenet', include_top=False, pooling='avg')
        # 512 dimensional features
        #base_model=vgg19.VGG19(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model=Model(inputs=base_model.input,outputs=base_model.get_layer('block4_pool').output)

    images = os.listdir(wsi_dir)
    for image_name in images:
        if '.svs' in image_name:
            patient_id=image_name[0:23]
            image_features = []
            image_names = []
            #patient_id='TCGA-2F-A9KT'
            #patches=os.listdir(train_data_dir[ind]+patient_id+'*.png')
            patches=glob.glob(data_dir+patient_id+'*.png')

            for patch_name in patches:
                patch_split=patch_name.split("\\")
                img = image.load_img(patch_name, target_size=(img_height,img_width))
                # convert image to numpy array
                x = image.img_to_array(img)

                # the image is now in an array of shape (224, 224, 3)
                # need to expand it to (1, 224, 224, 3) as it's expecting a list
                x = np.expand_dims(x, axis=0)
                #imshow(np.uint8(x[0,:,:,:]))

                if model_name=='resnet50':
                    x = resnet50.preprocess_input(x)
                elif model_name=='nasnet_large':
                    x = nasnet.preprocess_input(x)
                elif model_name == 'xception':
                    x = xception.preprocess_input(x)
                elif model_name=='inceptionv3':
                    x=inception_v3.preprocess_input(x)
                elif model_name == 'inceptionresnetv2':
                    x=inception_resnet_v2.preprocess_input(x)
                elif model_name=='densenet':
                    x=densenet.preprocess_input(x)
                else:
                    x=vgg19.preprocess_input(x)

                # extract the features
                features = model.predict(x)[0]
                #features=np.mean(features,axis=(0,1))

                image_features.append(features)
                image_names.append(patch_split[1])

            if save_features==True:
                scipy.io.savemat('./step2_output/'+patient_id+'_feat.mat', mdict={'image_features': image_features, 'image_names':image_names})
Пример #13
0
def build_cnn():

    nasnet_inst = nasnet.NASNetLarge(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=(HEIGHT, WIDTH, 3),
                                     pooling=None,
                                     classes=1000)

    x = nasnet_inst.output
    x = Dense(64, activation="relu")(x)
    x = Dense(2, activation="softmax")(x)

    model = Model(inputs=nasnet_inst.inputs, outputs=x)

    for i in model.layers:
        i.trainable = True

    trainable_layers = [
        "input_1", "stem_conv1", "stem_bn1", "activation_1",
        "reduction_conv_1_stem_1", "reduction_bn_1_stem_1", "activation_2",
        "activation_4", "separable_conv_1_reduction_left",
        "separable_conv_1_reduction_1_st", "separable_conv_1_bn_reduction_l",
        "separable_conv_1_bn_reduction_1", "activation_3", "activation_5",
        "separable_conv_2_reduction_left", "separable_conv_2_reduction_1_st",
        "separable_conv_2_bn_reduction_l", "separable_conv_2_bn_reduction_1",
        "activation_6", "reduction_add_1_stem_1",
        "separable_conv_1_reduction_righ", "activation_8", "activation_10",
        "separable_conv_1_bn_reduction_r", "separable_conv_1_reduction_righ",
        "separable_conv_1_reduction_left", "activation_7",
        "separable_conv_1_bn_reduction_r", "separable_conv_1_bn_reduction_l",
        "separable_conv_2_reduction_righ", "activation_9", "activation_11",
        "reduction_left2_stem_1", "separable_conv_2_bn_reduction_r",
        "separable_conv_2_reduction_righ", "separable_conv_2_reduction_left",
        "adjust_relu_1_stem_2", "reduction_add_2_stem_1",
        "reduction_left3_stem_1", "separable_conv_2_bn_reduction_r",
        "reduction_left4_stem_1", "separable_conv_2_bn_reduction_l",
        "reduction_right5_stem_1", "zero_padding2d_1", "reduction_add3_stem_1",
        "add_1", "reduction_add4_stem_1", "cropping2d_1",
        "reduction_concat_stem_1", "adjust_avg_pool_1_stem_2",
        "adjust_avg_pool_2_stem_2", "activation_12", "adjust_conv_1_stem_2",
        "adjust_conv_2_stem_2", "reduction_conv_1_stem_2", "concatenate_1",
        "reduction_bn_1_stem_2", "adjust_bn_stem_2", "activation_13",
        "activation_15", "separable_conv_1_reduction_left",
        "separable_conv_1_reduction_1_st", "separable_conv_1_bn_reduction_l",
        "separable_conv_1_bn_reduction_1", "activation_14", "activation_16",
        "separable_conv_2_reduction_left", "separable_conv_2_reduction_1_st",
        "separable_conv_2_bn_reduction_l", "separable_conv_2_bn_reduction_1",
        "activation_17", "reduction_add_1_stem_2",
        "separable_conv_1_reduction_righ", "activation_19", "activation_21",
        "separable_conv_1_bn_reduction_r", "separable_conv_1_reduction_righ",
        "separable_conv_1_reduction_left", "activation_18",
        "separable_conv_1_bn_reduction_r", "separable_conv_1_bn_reduction_l",
        "separable_conv_2_reduction_righ", "activation_20", "activation_22",
        "reduction_left2_stem_2", "separable_conv_2_bn_reduction_r",
        "separable_conv_2_reduction_righ", "separable_conv_2_reduction_left",
        "adjust_relu_1_0", "reduction_add_2_stem_2", "reduction_left3_stem_2",
        "separable_conv_2_bn_reduction_r", "reduction_left4_stem_2",
        "separable_conv_2_bn_reduction_l", "reduction_right5_stem_2",
        "zero_padding2d_2", "reduction_add3_stem_2", "add_2",
        "reduction_add4_stem_2", "cropping2d_2", "reduction_concat_stem_2",
        "adjust_avg_pool_1_0", "adjust_avg_pool_2_0", "adjust_conv_1_0",
        "adjust_conv_2_0", "activation_23", "concatenate_2", "normal_conv_1_0",
        "adjust_bn_0", "normal_bn_1_0", "activation_24", "activation_26",
        "activation_28", "activation_30", "activation_32",
        "separable_conv_1_normal_left1_0", "separable_conv_1_normal_right1_",
        "separable_conv_1_normal_left2_0", "separable_conv_1_normal_right2_",
        "separable_conv_1_normal_left5_0", "separable_conv_1_bn_normal_left",
        "separable_conv_1_bn_normal_righ", "separable_conv_1_bn_normal_left",
        "separable_conv_1_bn_normal_righ", "separable_conv_1_bn_normal_left",
        "activation_25", "activation_27", "activation_29", "activation_31",
        "activation_33", "separable_conv_2_normal_left1_0",
        "separable_conv_2_normal_right1_", "separable_conv_2_normal_left2_0",
        "separable_conv_2_normal_right2_", "separable_conv_2_normal_left5_0",
        "separable_conv_2_bn_normal_left", "separable_conv_2_bn_normal_righ",
        "separable_conv_2_bn_normal_left", "separable_conv_2_bn_normal_righ",
        "normal_left3_0", "normal_left4_0", "normal_right4_0",
        "separable_conv_2_bn_normal_left", "normal_add_1_0", "normal_add_2_0",
        "normal_add_3_0", "normal_add_4_0", "normal_add_5_0",
        "normal_concat_0", "activation_34", "activation_35",
        "adjust_conv_projection_1", "normal_conv_1_1", "adjust_bn_1",
        "normal_bn_1_1", "activation_36", "activation_38", "activation_40",
        "activation_42", "activation_44", "separable_conv_1_normal_left1_1",
        "separable_conv_1_normal_right1_", "separable_conv_1_normal_left2_1",
        "separable_conv_1_normal_right2_", "separable_conv_1_normal_left5_1",
        "separable_conv_1_bn_normal_left", "separable_conv_1_bn_normal_righ",
        "separable_conv_1_bn_normal_left", "separable_conv_1_bn_normal_righ",
        "separable_conv_1_bn_normal_left", "activation_37", "activation_39",
        "activation_41", "activation_43", "activation_45",
        "separable_conv_2_normal_left1_1", "separable_conv_2_normal_right1_",
        "separable_conv_2_normal_left2_1", "separable_conv_2_normal_right2_",
        "separable_conv_2_normal_left5_1", "separable_conv_2_bn_normal_left",
        "separable_conv_2_bn_normal_righ", "separable_conv_2_bn_normal_left",
        "separable_conv_2_bn_normal_righ", "normal_left3_1", "normal_left4_1",
        "normal_right4_1", "separable_conv_2_bn_normal_left"
    ]
    #                            "normal_add_1_1","normal_add_2_1","normal_add_3_1","normal_add_4_1","normal_add_5_1",
    #                            "normal_concat_1","activation_46","activation_47","adjust_conv_projection_2","normal_conv_1_2","adjust_bn_2","normal_bn_1_2","activation_48","activation_50","activation_52",
    #                            "activation_54","activation_56","separable_conv_1_normal_left1_2","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_2","separable_conv_1_normal_right2_",
    #                            "separable_conv_1_normal_left5_2","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ",
    #                            "separable_conv_1_bn_normal_left","activation_49","activation_51","activation_53","activation_55","activation_57","separable_conv_2_normal_left1_2","separable_conv_2_normal_right1_",
    #                            "separable_conv_2_normal_left2_2","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_2","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ",
    #                            "separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_2","normal_left4_2","normal_right4_2","separable_conv_2_bn_normal_left","normal_add_1_2",
    #                            "normal_add_2_2","normal_add_3_2","normal_add_4_2","normal_add_5_2","normal_concat_2","activation_58","activation_59","adjust_conv_projection_3","normal_conv_1_3","adjust_bn_3",
    #                            "normal_bn_1_3","activation_60","activation_62","activation_64","activation_66","activation_68","separable_conv_1_normal_left1_3","separable_conv_1_normal_right1_",
    #                            "separable_conv_1_normal_left2_3","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_3","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ",
    #                            "separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_61","activation_63","activation_65","activation_67","activation_69",
    #                            "separable_conv_2_normal_left1_3","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_3","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_3",
    #                            "separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_3","normal_left4_3",
    #                            "normal_right4_3","separable_conv_2_bn_normal_left","normal_add_1_3","normal_add_2_3","normal_add_3_3","normal_add_4_3","normal_add_5_3","normal_concat_3","activation_70",
    #                            "activation_71","adjust_conv_projection_4","normal_conv_1_4","adjust_bn_4","normal_bn_1_4","activation_72","activation_74","activation_76","activation_78","activation_80",
    #                            "separable_conv_1_normal_left1_4","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_4","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_4",
    #                            "separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left",
    #                            "activation_73","activation_75","activation_77","activation_79","activation_81","separable_conv_2_normal_left1_4","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_4",
    #                            "separable_conv_2_normal_right2_","separable_conv_2_normal_left5_4","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left",
    #                            "separable_conv_2_bn_normal_righ","normal_left3_4","normal_left4_4","normal_right4_4","separable_conv_2_bn_normal_left","normal_add_1_4","normal_add_2_4","normal_add_3_4",
    #                            "normal_add_4_4","normal_add_5_4","normal_concat_4","activation_82","activation_83","adjust_conv_projection_5","normal_conv_1_5","adjust_bn_5","normal_bn_1_5","activation_84",
    #                            "activation_86","activation_88","activation_90","activation_92","separable_conv_1_normal_left1_5","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_5",
    #                            "separable_conv_1_normal_right2_","separable_conv_1_normal_left5_5","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left",
    #                            "separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_85","activation_87","activation_89","activation_91","activation_93","separable_conv_2_normal_left1_5",
    #                            "separable_conv_2_normal_right1_","separable_conv_2_normal_left2_5","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_5","separable_conv_2_bn_normal_left",
    #                            "separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_5","normal_left4_5","normal_right4_5",
    #                            "separable_conv_2_bn_normal_left","normal_add_1_5","normal_add_2_5","normal_add_3_5","normal_add_4_5","normal_add_5_5","normal_concat_5","activation_95","activation_94",
    #                            "reduction_conv_1_reduce_6","adjust_conv_projection_reduce_6","reduction_bn_1_reduce_6","adjust_bn_reduce_6","activation_96","activation_98","separable_conv_1_reduction_left",
    #                            "separable_conv_1_reduction_1_re","separable_conv_1_bn_reduction_l","separable_conv_1_bn_reduction_1","activation_97","activation_99","separable_conv_2_reduction_left",
    #                            "separable_conv_2_reduction_1_re","separable_conv_2_bn_reduction_l","separable_conv_2_bn_reduction_1","activation_100","reduction_add_1_reduce_6","separable_conv_1_reduction_righ",
    #                            "activation_102","activation_104","separable_conv_1_bn_reduction_r","separable_conv_1_reduction_righ","separable_conv_1_reduction_left","activation_101","separable_conv_1_bn_reduction_r",
    #                            "separable_conv_1_bn_reduction_l","separable_conv_2_reduction_righ","activation_103","activation_105","reduction_left2_reduce_6","separable_conv_2_bn_reduction_r",
    #                            "separable_conv_2_reduction_righ","separable_conv_2_reduction_left","adjust_relu_1_7","reduction_add_2_reduce_6","reduction_left3_reduce_6","separable_conv_2_bn_reduction_r",
    #                            "reduction_left4_reduce_6","separable_conv_2_bn_reduction_l","reduction_right5_reduce_6","zero_padding2d_3","reduction_add3_reduce_6","add_3","reduction_add4_reduce_6",
    #                            "cropping2d_3","reduction_concat_reduce_6","adjust_avg_pool_1_7","adjust_avg_pool_2_7","adjust_conv_1_7","adjust_conv_2_7","activation_106","concatenate_3","normal_conv_1_7",
    #                            "adjust_bn_7","normal_bn_1_7","activation_107","activation_109","activation_111","activation_113","activation_115","separable_conv_1_normal_left1_7","separable_conv_1_normal_right1_",
    #                            "separable_conv_1_normal_left2_7","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_7","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ",
    #                            "separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_108","activation_110","activation_112","activation_114",
    #                            "activation_116","separable_conv_2_normal_left1_7","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_7","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_7",
    #                            "separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_7","normal_left4_7","normal_right4_7",
    #                            "separable_conv_2_bn_normal_left","normal_add_1_7","normal_add_2_7","normal_add_3_7","normal_add_4_7","normal_add_5_7","normal_concat_7","activation_117","activation_118","adjust_conv_projection_8",
    #                            "normal_conv_1_8","adjust_bn_8","normal_bn_1_8","activation_119","activation_121","activation_123","activation_125","activation_127","separable_conv_1_normal_left1_8",
    #                            "separable_conv_1_normal_right1_","separable_conv_1_normal_left2_8","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_8","separable_conv_1_bn_normal_left",
    #                            "separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_120","activation_122",
    #                            "activation_124","activation_126","activation_128","separable_conv_2_normal_left1_8","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_8","separable_conv_2_normal_right2_",
    #                            "separable_conv_2_normal_left5_8","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_8",
    #                            "normal_left4_8","normal_right4_8","separable_conv_2_bn_normal_left","normal_add_1_8","normal_add_2_8","normal_add_3_8","normal_add_4_8","normal_add_5_8","normal_concat_8","activation_129",
    #                            "activation_130","adjust_conv_projection_9","normal_conv_1_9","adjust_bn_9","normal_bn_1_9","activation_131","activation_133","activation_135","activation_137","activation_139",
    #                            "separable_conv_1_normal_left1_9","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_9","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_9",
    #                            "separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left",
    #                            "activation_132","activation_134","activation_136","activation_138","activation_140","separable_conv_2_normal_left1_9","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_9",
    #                            "separable_conv_2_normal_right2_","separable_conv_2_normal_left5_9","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left",
    #                            "separable_conv_2_bn_normal_righ","normal_left3_9","normal_left4_9","normal_right4_9","separable_conv_2_bn_normal_left","normal_add_1_9","normal_add_2_9","normal_add_3_9",
    #                            "normal_add_4_9","normal_add_5_9","normal_concat_9","activation_141","activation_142","adjust_conv_projection_10","normal_conv_1_10","adjust_bn_10","normal_bn_1_10",
    #                            "activation_143","activation_145","activation_147","activation_149","activation_151","separable_conv_1_normal_left1_1","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_1",
    #                            "separable_conv_1_normal_right2_","separable_conv_1_normal_left5_1","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left",
    #                            "separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_144","activation_146","activation_148","activation_150","activation_152","separable_conv_2_normal_left1_1",
    #                            "separable_conv_2_normal_right1_","separable_conv_2_normal_left2_1","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_1","separable_conv_2_bn_normal_left",
    #                            "separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_10","normal_left4_10","normal_right4_10","separable_conv_2_bn_normal_left",
    #                            "normal_add_1_10","normal_add_2_10","normal_add_3_10","normal_add_4_10","normal_add_5_10","normal_concat_10","activation_153","activation_154","adjust_conv_projection_11",
    #                            "normal_conv_1_11","adjust_bn_11","normal_bn_1_11","activation_155","activation_157","activation_159","activation_161","activation_163","separable_conv_1_normal_left1_1",
    #                            "separable_conv_1_normal_right1_","separable_conv_1_normal_left2_1","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_1","separable_conv_1_bn_normal_left",
    #                            "separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_156","activation_158",
    #                            "activation_160","activation_162","activation_164","separable_conv_2_normal_left1_1","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_1","separable_conv_2_normal_right2_",
    #                            "separable_conv_2_normal_left5_1","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ",
    #                            "normal_left3_11","normal_left4_11","normal_right4_11","separable_conv_2_bn_normal_left","normal_add_1_11","normal_add_2_11","normal_add_3_11","normal_add_4_11","normal_add_5_11",
    #                            "normal_concat_11","activation_165","activation_166","adjust_conv_projection_12","normal_conv_1_12","adjust_bn_12","normal_bn_1_12","activation_167","activation_169","activation_171",
    #                            "activation_173","activation_175","separable_conv_1_normal_left1_1","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_1","separable_conv_1_normal_right2_",
    #                            "separable_conv_1_normal_left5_1","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ",
    #                            "separable_conv_1_bn_normal_left","activation_168","activation_170","activation_172","activation_174","activation_176","separable_conv_2_normal_left1_1","separable_conv_2_normal_right1_",
    #                            "separable_conv_2_normal_left2_1","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_1","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ",
    #                            "separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_12","normal_left4_12","normal_right4_12","separable_conv_2_bn_normal_left","normal_add_1_12",
    #                            "normal_add_2_12","normal_add_3_12","normal_add_4_12","normal_add_5_12","normal_concat_12","activation_178","activation_177","reduction_conv_1_reduce_12","adjust_conv_projection_reduce_1",
    #                            "reduction_bn_1_reduce_12","adjust_bn_reduce_12","activation_179","activation_181","separable_conv_1_reduction_left","separable_conv_1_reduction_1_re","separable_conv_1_bn_reduction_l",
    #                            "separable_conv_1_bn_reduction_1","activation_180","activation_182","separable_conv_2_reduction_left","separable_conv_2_reduction_1_re","separable_conv_2_bn_reduction_l",
    #                            "separable_conv_2_bn_reduction_1","activation_183","reduction_add_1_reduce_12","separable_conv_1_reduction_righ","activation_185","activation_187","separable_conv_1_bn_reduction_r",
    #                            "separable_conv_1_reduction_righ","separable_conv_1_reduction_left","activation_184","separable_conv_1_bn_reduction_r","separable_conv_1_bn_reduction_l","separable_conv_2_reduction_righ",
    #                            "activation_186","activation_188","reduction_left2_reduce_12","separable_conv_2_bn_reduction_r","separable_conv_2_reduction_righ","separable_conv_2_reduction_left",
    #                            "adjust_relu_1_13","reduction_add_2_reduce_12","reduction_left3_reduce_12","separable_conv_2_bn_reduction_r","reduction_left4_reduce_12","separable_conv_2_bn_reduction_l",
    #                            "reduction_right5_reduce_12","zero_padding2d_4","reduction_add3_reduce_12","add_4","reduction_add4_reduce_12","cropping2d_4","reduction_concat_reduce_12","adjust_avg_pool_1_13",
    #                            "adjust_avg_pool_2_13","adjust_conv_1_13","adjust_conv_2_13","activation_189","concatenate_4","normal_conv_1_13","adjust_bn_13","normal_bn_1_13","activation_190","activation_192",
    #                            "activation_194","activation_196","activation_198","separable_conv_1_normal_left1_1","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_1","separable_conv_1_normal_right2_",
    #                            "separable_conv_1_normal_left5_1","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ",
    #                            "separable_conv_1_bn_normal_left","activation_191","activation_193","activation_195","activation_197","activation_199","separable_conv_2_normal_left1_1","separable_conv_2_normal_right1_",
    #                            "separable_conv_2_normal_left2_1","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_1","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ",
    #                            "separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_13","normal_left4_13","normal_right4_13","separable_conv_2_bn_normal_left","normal_add_1_13",
    #                            "normal_add_2_13","normal_add_3_13","normal_add_4_13","normal_add_5_13","normal_concat_13","activation_200","activation_201","adjust_conv_projection_14","normal_conv_1_14",
    #                            "adjust_bn_14","normal_bn_1_14","activation_202","activation_204","activation_206","activation_208","activation_210","separable_conv_1_normal_left1_1","separable_conv_1_normal_right1_",
    #                            "separable_conv_1_normal_left2_1","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_1","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ",
    #                            "separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_203","activation_205","activation_207","activation_209",
    #                            "activation_211","separable_conv_2_normal_left1_1","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_1","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_1",
    #                            "separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_14","normal_left4_14",
    #                            "normal_right4_14","separable_conv_2_bn_normal_left","normal_add_1_14","normal_add_2_14","normal_add_3_14","normal_add_4_14","normal_add_5_14","normal_concat_14","activation_212",
    #                            "activation_213","adjust_conv_projection_15","normal_conv_1_15","adjust_bn_15","normal_bn_1_15","activation_214","activation_216","activation_218","activation_220","activation_222",
    #                            "separable_conv_1_normal_left1_1","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_1","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_1",
    #                            "separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left",
    #                            "activation_215","activation_217","activation_219","activation_221","activation_223","separable_conv_2_normal_left1_1","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_1",
    #                            "separable_conv_2_normal_right2_","separable_conv_2_normal_left5_1","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left",
    #                            "separable_conv_2_bn_normal_righ","normal_left3_15","normal_left4_15","normal_right4_15","separable_conv_2_bn_normal_left","normal_add_1_15","normal_add_2_15","normal_add_3_15",
    #                            "normal_add_4_15","normal_add_5_15","normal_concat_15","activation_224","activation_225","adjust_conv_projection_16","normal_conv_1_16","adjust_bn_16","normal_bn_1_16",
    #                            "activation_226","activation_228","activation_230","activation_232","activation_234","separable_conv_1_normal_left1_1","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_1",
    #                            "separable_conv_1_normal_right2_","separable_conv_1_normal_left5_1","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left",
    #                            "separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_227","activation_229","activation_231","activation_233","activation_235","separable_conv_2_normal_left1_1",
    #                            "separable_conv_2_normal_right1_","separable_conv_2_normal_left2_1","separable_conv_2_normal_right2_","separable_conv_2_normal_left5_1","separable_conv_2_bn_normal_left",
    #                            "separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_16","normal_left4_16","normal_right4_16","separable_conv_2_bn_normal_left",
    #                            "normal_add_1_16","normal_add_2_16","normal_add_3_16","normal_add_4_16","normal_add_5_16","normal_concat_16","activation_236","activation_237","adjust_conv_projection_17",
    #                            "normal_conv_1_17","adjust_bn_17","normal_bn_1_17","activation_238","activation_240","activation_242","activation_244","activation_246","separable_conv_1_normal_left1_1",
    #                            "separable_conv_1_normal_right1_","separable_conv_1_normal_left2_1","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_1","separable_conv_1_bn_normal_left",
    #                            "separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_239","activation_241",
    #                            "activation_243","activation_245","activation_247","separable_conv_2_normal_left1_1","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_1","separable_conv_2_normal_right2_",
    #                            "separable_conv_2_normal_left5_1","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","normal_left3_17",
    #                            "normal_left4_17","normal_right4_17","separable_conv_2_bn_normal_left","normal_add_1_17","normal_add_2_17","normal_add_3_17","normal_add_4_17","normal_add_5_17","normal_concat_17",
    #                            "activation_248","activation_249","adjust_conv_projection_18","normal_conv_1_18","adjust_bn_18","normal_bn_1_18","activation_250","activation_252","activation_254","activation_256",
    #                            "activation_258","separable_conv_1_normal_left1_1","separable_conv_1_normal_right1_","separable_conv_1_normal_left2_1","separable_conv_1_normal_right2_","separable_conv_1_normal_left5_1",
    #                            "separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","separable_conv_1_bn_normal_righ","separable_conv_1_bn_normal_left","activation_251",
    #                            "activation_253","activation_255","activation_257","activation_259","separable_conv_2_normal_left1_1","separable_conv_2_normal_right1_","separable_conv_2_normal_left2_1",
    #                            "separable_conv_2_normal_right2_","separable_conv_2_normal_left5_1","separable_conv_2_bn_normal_left","separable_conv_2_bn_normal_righ","separable_conv_2_bn_normal_left",
    #                            "separable_conv_2_bn_normal_righ","normal_left3_18","normal_left4_18","normal_right4_18","separable_conv_2_bn_normal_left","normal_add_1_18","normal_add_2_18","normal_add_3_18",
    #                            "normal_add_4_18","normal_add_5_18","normal_concat_18","activation_260","global_average_pooling2d_1","predictions","dense_1","dense_2"]
    #

    #trainable_layers = ["block3_pool","block4_conv1","block4_conv2","block4_conv3","block4_conv4","block4_pool","block5_conv1","block5_conv2","block5_conv3","block5_conv4","block5_pool","flatten","fc1","fc2","predictions","dense_1","dense_2"]

    for i in trainable_layers:
        try:
            model.get_layer(i).trainable = False
        except ValueError:
            print("Layer does not exist: ", i)

    sgd = SGD(lr=1e-6, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Пример #14
0
# Create models (make sure weights are already downloaded!).
# This takes a while...
print('Loading Models')
resnet = resnet50.ResNet50(
    weights='weights/resnet50_weights_tf_dim_ordering_tf_kernels.h5')
xc = xception.Xception(
    weights='weights/xception_weights_tf_dim_ordering_tf_kernels.h5')
v19 = vgg19.VGG19(
    weights='weights/vgg19_weights_tf_dim_ordering_tf_kernels.h5')
ic3 = inception_v3.InceptionV3(
    weights='weights/inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
ic_resnet = inception_resnet_v2.InceptionResNetV2(
    weights='weights/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
)
mobile = mobilenet.MobileNet(weights='weights/mobilenet_1_0_224_tf.h5')
nn_large = nasnet.NASNetLarge(weights='weights/NASNet-large.h5')
print('Models Loaded')
print('')

#############################################
# RUN FRAMES THROUGH IMAGE RECOGNITION MODELS
#############################################

# Create dictionary to hold results for each model.
resnet_results = dict()
xc_results = dict()
v19_results = dict()
ic3_results = dict()
ic_resnet_results = dict()
mobile_results = dict()
nn_large_results = dict()
def save_bottlebeck_features(model_name):

    if model_name == 'resnet50':
        model = resnet50.ResNet50(weights='imagenet',
                                  include_top=False,
                                  pooling='avg')
        feat_output = [
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/2)resnet50/',
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/2)resnet50/'
        ]

        # 2048 dimensional features
        # pooling: 1) None: output is 16x16x2048, 2) avg: 1x1x2048, 3) max: 1x1x2048
        #base_model=resnet50.ResNet50(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model = Model(inputs=base_model.input, outputs=base_model.get_layer('activation_25').output)
    elif model_name == 'nasnet_large':
        model = nasnet.NASNetLarge(input_shape=(img_height, img_width, 3),
                                   weights='imagenet',
                                   include_top=False,
                                   pooling='avg')
        #4032 dimensional features
    elif model_name == 'xception':
        model = xception.Xception(input_shape=(img_height, img_width, 3),
                                  weights='imagenet',
                                  include_top=False,
                                  pooling='avg')
        feat_output = [
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/4)xception/',
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/4)xception/'
        ]

        #2048 dimensional features
    elif model_name == 'inceptionv3':
        model = inception_v3.InceptionV3(input_shape=(img_height, img_width,
                                                      3),
                                         weights='imagenet',
                                         include_top=False,
                                         pooling='avg')
        feat_output = [
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/5)inceptionv3/',
            'E:/Hongming/projects/tcga-bladder-mutationburden/feature_output/P_CN_20X/5)inceptionv3/'
        ]
        #2048 dimensional features
    elif model_name == 'inceptionresnetv2':
        model = inception_resnet_v2.InceptionResNetV2(input_shape=(img_height,
                                                                   img_width,
                                                                   3),
                                                      weights='imagenet',
                                                      include_top=False,
                                                      pooling='avg')
        #1536 dimensional features
    elif model_name == 'densenet':
        model = densenet.DenseNet201(input_shape=(img_height, img_width, 3),
                                     weights='imagenet',
                                     include_top=False,
                                     pooling='avg')
        # 1920 dimensional features
    else:
        model = vgg19.VGG19(weights='imagenet',
                            include_top=False,
                            pooling='avg')
        # 512 dimensional features
        #base_model=vgg19.VGG19(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model=Model(inputs=base_model.input,outputs=base_model.get_layer('block4_pool').output)

    #for i,layer in enumerate(model.layers):
    #    print(i,layer.name)

    #print(model.summary())

    for ind in range(1, len(img_path)):
        path_ind = img_path[ind]
        #path_split=path_ind.split("/")
        images = os.listdir(path_ind)
        for image_name in images:
            if '.svs' in image_name:
                patient_id = image_name[0:23]
                image_features = []
                image_names = []

                ss = time.time()
                patient_id = 'TCGA-2F-A9KO'
                #patches=os.listdir(train_data_dir[ind]+patient_id+'*.png')
                patches = glob.glob(train_data_dir[0] + patient_id + '*.png')

                for patch_name in patches:
                    patch_split = patch_name.split("\\")
                    img = image.load_img(patch_name,
                                         target_size=(img_height, img_width))
                    # convert image to numpy array
                    x = image.img_to_array(img)

                    # the image is now in an array of shape (224, 224, 3)
                    # need to expand it to (1, 224, 224, 3) as it's expecting a list
                    x = np.expand_dims(x, axis=0)
                    #imshow(np.uint8(x[0,:,:,:]))

                    if model_name == 'resnet50':
                        x = resnet50.preprocess_input(x)
                    elif model_name == 'nasnet_large':
                        x = nasnet.preprocess_input(x)
                    elif model_name == 'xception':
                        x = xception.preprocess_input(x)
                    elif model_name == 'inceptionv3':
                        x = inception_v3.preprocess_input(x)
                    elif model_name == 'inceptionresnetv2':
                        x = inception_resnet_v2.preprocess_input(x)
                    elif model_name == 'densenet':
                        x = densenet.preprocess_input(x)
                    else:
                        x = vgg19.preprocess_input(x)

                    # extract the features
                    features = model.predict(x)[0]
                    #features=np.mean(features,axis=(0,1))

                    image_features.append(features)
                    image_names.append(patch_split[1])

                se = time.time() - ss
                print(se)
                if save_features == True:
                    scipy.io.savemat(feat_output[ind] + patient_id +
                                     '_feat.mat',
                                     mdict={
                                         'image_features': image_features,
                                         'image_names': image_names
                                     })
Пример #16
0
def getFinalModel(num_outputs=num_outputs,
                  lr=learning_rate,
                  include_vgg_top=include_vgg_top,
                  use_vgg16=use_vgg16):
    if use_vgg16:
        modelID = 'CNN_VGG16'
        inp = (224, 224, 3)
        modelPackage = vgg16
        margins = (8, 8, 48, 48)
        Target_Frame_Shape = (240, 320, 3)
        cnn_model = vgg16.VGG16(weights='imagenet',
                                input_shape=inp,
                                include_top=include_vgg_top)
    elif True:
        inp = (299, 299, 3)
        modelPackage = inception_v3
        modelID = 'CNN_InceptionV3'
        margins = (0, 1, 51, 50)
        Target_Frame_Shape = (300, 400, 3)
        cnn_model = inception_v3.InceptionV3(weights='imagenet',
                                             input_shape=inp,
                                             include_top=include_vgg_top)
    else:
        inp = (331, 331, 3)
        modelPackage = nasnet
        modelID = 'CNN_ASNetLarge'
        margins = (14, 15, 74, 75)
        Target_Frame_Shape = (360, 480, 3)
        cnn_model = nasnet.NASNetLarge(weights='imagenet',
                                       input_shape=inp,
                                       include_top=include_vgg_top)

    def preprocess_input(imagePath):
        return preprocess_input_for_model(imagePath, Target_Frame_Shape,
                                          margins, modelPackage)

    if include_vgg_top:
        modelID = modelID + '_inc_top'
        #cnn_model = addDropout(cnn_model)
        cnn_model.layers.pop()
        cnn_model.outputs = [cnn_model.layers[-1].output]
        cnn_model.output_layers = [cnn_model.layers[-1]]
        cnn_model.layers[-1].outbound_nodes = []
        for layer in cnn_model.layers:
            layer.trainable = False
        x = cnn_model.layers[-1].output
        #x = Dropout(0.25, name = 'dropout3_025')(x)
        x = Dense(1024, activation='tanh', name='fc1024')(x)
        #x = Dropout(0.25, name = 'dropout_025')(x)
        #x = Dense(num_outputs, name = 'fc3')(x)
        cnn_model = Model(inputs=cnn_model.input, outputs=x)
    """
    """

    #cnn_model.summary(), shape=(inp[0], inp[1], inp[2])
    cnn = Sequential()
    cnn.add(cnn_model)
    if not include_vgg_top:
        cnn.add(Flatten())
    cnn.add(Dense(num_outputs))

    modelID = modelID + '_output%d' % num_outputs

    modelID = modelID + '_BatchSize%d' % train_batch_size
    modelID = modelID + '_inEpochs%d' % in_epochs
    modelID = modelID + '_outEpochs%d' % out_epochs

    #for layer in rnn.layers[:1]:
    #    layer.trainable = False
    adam = Adam(lr=lr)
    modelID = modelID + '_AdamOpt_lr-%f' % lr
    cnn.compile(
        optimizer=adam,
        loss='mean_absolute_error')  #'mean_squared_error', metrics=['mae'])#
    modelID = modelID + '_%s' % now()[:-7].replace(' ', '_').replace(':', '-')
    return cnn_model, cnn, modelID, preprocess_input