Exemplo n.º 1
0
def test_inceptionresnetv2_variable_input_channels():
    global_image_data_format = K.image_data_format()

    K.set_image_data_format('channels_first')
    input_shape = (1, None, None)
    model = applications.InceptionResNetV2(weights=None,
                                           include_top=False,
                                           input_shape=input_shape)
    assert model.output_shape == (None, 1536, None, None)
    input_shape = (4, None, None)
    model = applications.InceptionResNetV2(weights=None,
                                           include_top=False,
                                           input_shape=input_shape)
    assert model.output_shape == (None, 1536, None, None)

    K.set_image_data_format('channels_last')
    input_shape = (None, None, 1)
    model = applications.InceptionResNetV2(weights=None,
                                           include_top=False,
                                           input_shape=input_shape)
    assert model.output_shape == (None, None, None, 1536)
    input_shape = (None, None, 4)
    model = applications.InceptionResNetV2(weights=None,
                                           include_top=False,
                                           input_shape=input_shape)
    assert model.output_shape == (None, None, None, 1536)

    K.set_image_data_format(global_image_data_format)
Exemplo n.º 2
0
    def model_(self, number=0):
        print('=' * 100)
        print('Load Model')
        print('Model name: ', self.save_name)
        print('-' * 100)
        if self.self_training:
            self.model = models.load_model(self.model_dir + '/' +
                                           self.save_name + '.h5')
            adam = op.Adam(lr=0.00001)
            self.model.compile(optimizer=adam,
                               loss='categorical_crossentropy',
                               metrics=['acc'])
        else:
            a = app.InceptionResNetV2(include_top=False,
                                      weights='imagenet',
                                      input_shape=(224, 224, 3),
                                      pooling='avg',
                                      classes=10)
            x = a.output
            output = layers.Dense(10, activation='softmax')(x)
            self.model = models.Model(a.input,
                                      output,
                                      name='Pretrain_inception_resnet_v2')

            adam = op.Adam(lr=0.0001)
            self.model.compile(optimizer=adam,
                               loss='categorical_crossentropy',
                               metrics=['acc'])
        print('Complete')
        print('=' * 100)
Exemplo n.º 3
0
    def build(width, height, depth, classes):
        #        model = Sequential()
        inputShape = (height, width, depth)
        #        chanDim = -1

        if K.image_data_format() == "channels_first":
            inputShape = (depth, height, width)


#            chanDim = 1
#        model.add(Conv2D(64, (3, 3), padding="same",
#            input_shape=inputShape))

        base_model = applications.InceptionResNetV2(
            weights="imagenet", include_top=False,
            input_shape=inputShape)  # 预训练的VGG16网络,替换掉顶部网络
        print('base_model', base_model.summary())

        #        for layer in base_model.layers[:15]: layer.trainable = False  # 冻结预训练网络前15层

        top_model = Sequential()  # 自定义顶层网络
        top_model.add(
            Flatten(input_shape=base_model.output_shape[1:]))  # 将预训练网络展平
        top_model.add(Dense(2, activation='relu'))  # 全连接层,输入像素256
        top_model.add(Dropout(0.5))  # Dropout概率0.5
        top_model.add(Dense(classes, activation='softmax'))  # 输出层,二分类
        print('top_model', top_model.summary())
        model = Model(inputs=base_model.input,
                      outputs=top_model(base_model.output))
        return model
Exemplo n.º 4
0
def base_network(network='InceptionV3'):
    if network == 'InceptionV3':
        base_model = applications.InceptionV3(weights='imagenet',
                                              include_top=False)
    elif network == 'VGG16':
        base_model = applications.VGG16(weights='imagenet', include_top=False)
    elif network == 'VGG19':
        base_model = applications.VGG19(weights='imagenet', include_top=False)
    elif network == 'ResNet50':
        base_model = applications.ResNet50(weights='imagenet',
                                           include_top=False)
    elif network == 'InceptionResNetV2':
        base_model = applications.InceptionResNetV2(weights='imagenet',
                                                    include_top=False)
    elif network == 'MobileNet':
        base_model = applications.MobileNet(weights='imagenet',
                                            include_top=False)
    elif network == 'DenseNet121':
        base_model = applications.DenseNet121(weights='imagenet',
                                              include_top=False)
    else:
        print('Wrong Model selected.')
        return None

    return base_model
Exemplo n.º 5
0
def save_model_to_file(filename,
                       architecture,
                       weights='imagenet',
                       input_shape=(299, 299, 3)):

    base_path = os.getcwd()
    model_path = os.path.join(base_path, filename)

    # Create and save network file
    if not os.path.exists(model_path):
        # Initialise Keras object
        if architecture == 'inceptionv3':
            mymodel = applications.InceptionV3(
                weights=weights, input_tensor=Input(shape=input_shape))
        elif architecture == 'resnet152v2':
            mymodel = applications.ResNet152V2(
                weights=weights, input_tensor=Input(shape=input_shape))
        elif architecture == 'inceptionresnetv2':
            mymodel = applications.InceptionResNetV2(
                weights=weights, input_tensor=Input(shape=input_shape))
        else:
            print('Invalid architecture')
            return

        mymodel.save(os.path.join(base_path, filename))
def save_bottlebeck_features():
    train_datagen = ImageDataGenerator(rescale=1. / 255)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    # build the InceptionResNetV2 network
    model = applications.InceptionResNetV2(include_top=False,
                                           weights='imagenet')

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_train = model.predict_generator(
        train_generator,
        aug_factor * nb_classes * nb_train_samples // batch_size)
    train_labels = np.tile(np.repeat(np.arange(nb_classes), nb_train_samples),
                           aug_factor)
    np.savez(bn_train_path, data=bottleneck_features_train, label=train_labels)

    test_generator = test_datagen.flow_from_directory(validation_data_dir,
                                                      target_size=(img_width,
                                                                   img_height),
                                                      batch_size=batch_size,
                                                      class_mode=None,
                                                      shuffle=False)
    bottleneck_features_validation = model.predict_generator(
        test_generator, nb_classes * nb_validation_samples // batch_size)
    validation_labels = np.repeat(np.arange(nb_classes), nb_validation_samples)
    np.savez(bn_validation_path,
             data=bottleneck_features_validation,
             label=validation_labels)
Exemplo n.º 7
0
def models_factory(model_type, image_size):

    if model_type == "vgg16":
        base_model = applications.VGG16(weights='imagenet',
                                        include_top=False,
                                        input_shape=(image_size[0],
                                                     image_size[1], 3))
    elif model_type == "vgg19":
        base_model = applications.VGG19(weights='imagenet',
                                        include_top=False,
                                        input_shape=(image_size[0],
                                                     image_size[1], 3))
    elif model_type == "resnet50":
        base_model = applications.ResNet50(weights='imagenet',
                                           include_top=False,
                                           input_shape=(image_size[0],
                                                        image_size[1], 3))
    elif model_type == "inceptionv3":
        base_model = applications.InceptionV3(weights='imagenet',
                                              include_top=False,
                                              input_shape=(image_size[0],
                                                           image_size[1], 3))
    elif model_type == "xception":
        base_model = applications.Xception(weights='imagenet',
                                           include_top=False,
                                           input_shape=(image_size[0],
                                                        image_size[1], 3))
    elif model_type == "mobilenet":
        base_model = applications.MobileNet(weights='imagenet',
                                            include_top=False,
                                            input_shape=(image_size[0],
                                                         image_size[1], 3))
    elif model_type == "inceptionresnetv2":
        base_model = applications.InceptionResNetV2(weights='imagenet',
                                                    include_top=False,
                                                    input_shape=(image_size[0],
                                                                 image_size[1],
                                                                 3))
    elif model_type == "nasnet":
        base_model = applications.nasnet.NASNetLarge(
            weights='imagenet',
            include_top=False,
            input_shape=(image_size[0], image_size[1], 3))

    for layer in base_model.layers:
        layer.trainable = False

    top_model = Sequential()
    top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
    top_model.add(
        Dense(1024, kernel_initializer='glorot_uniform', activation='relu'))
    top_model.add(
        Dense(1024, kernel_initializer='glorot_uniform', activation='relu'))
    top_model.add(Dense(1, activation='sigmoid'))
    model = Model(input=base_model.input, output=top_model(base_model.output))

    return model, base_model
Exemplo n.º 8
0
def get_imagenet_architecture(architecture, variant, size, alpha, output_layer, include_top=False, weights='imagenet'):
    from keras import applications, Model

    if include_top:
        assert output_layer == 'last'

    if size == 'auto':
        size = get_image_size(architecture, variant, size)

    shape = (size, size, 3)

    if architecture == 'densenet':
        if variant == 'auto':
            variant = 'densenet-121'
        if variant == 'densenet-121':
            model = applications.DenseNet121(weights=weights, include_top=include_top, input_shape=shape)
        elif variant == 'densenet-169':
            model = applications.DenseNet169(weights=weights, include_top=include_top, input_shape=shape)
        elif variant == 'densenet-201':
            model = applications.DenseNet201(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'inception-resnet-v2':
        model = applications.InceptionResNetV2(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'mobilenet':
        model = applications.MobileNet(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
    elif architecture == 'mobilenet-v2':
        model = applications.MobileNetV2(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
    elif architecture == 'nasnet':
        if variant == 'auto':
            variant = 'large'
        if variant == 'large':
            model = applications.NASNetLarge(weights=weights, include_top=include_top, input_shape=shape)
        else:
            model = applications.NASNetMobile(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'resnet-50':
        model = applications.ResNet50(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'vgg-16':
        model = applications.VGG16(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'vgg-19':
        model = applications.VGG19(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'xception':
        model = applications.Xception(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'inception-v3':
        model = applications.InceptionV3(weights=weights, include_top=include_top, input_shape=shape)

    if output_layer != 'last':
        try:
            if isinstance(output_layer, int):
                layer = model.layers[output_layer]
            else:
                layer = model.get_layer(output_layer)
        except Exception:
            raise VergeMLError('layer not found: {}'.format(output_layer))
        model = Model(inputs=model.input, outputs=layer.output)

    return model
Exemplo n.º 9
0
def create_InceptionResNetV2(image_size, num_class):
    resnet_conv = applications.InceptionResNetV2(weights='imagenet',
                                                 include_top=False,
                                                 input_shape=(image_size,
                                                              image_size, 3))

    model = models.Sequential()
    model.add(resnet_conv)
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dense(num_class, activation='softmax'))
    return model
    pass
Exemplo n.º 10
0
 def model_(self):
     print('=' * 50)
     print('Load Model')
     print('Model name: ', self.save_name)
     print('-' * 50)
     self.model = app.InceptionResNetV2(include_top=True,
                                        weights=None,
                                        input_shape=(224, 224, 1),
                                        classes=10)
     self.model.compile(optimizer=self.optimizer,
                        loss='categorical_crossentropy',
                        metrics=['acc'])
     print('Complete')
     print('=' * 50)
Exemplo n.º 11
0
def build_model(X_train, X_val, y_train, y_val):

    base_model = applications.InceptionResNetV2(weights="imagenet", include_top=False, input_shape=(IM_WIDTH, IM_HEIGHT, 3))
    # Data Augmentation

    train_datagen = ImageDataGenerator(
        rescale=1. / 255,
        horizontal_flip=True,
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=30,
        width_shift_range=0.2,
        height_shift_range=0.2

    )

    test_datagen = ImageDataGenerator(
        rescale=1. / 255,
        horizontal_flip=True,
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=30,
        width_shift_range=0.2,
        height_shift_range=0.2
    )

    train_generator = train_datagen.flow(
        X_train, y_train)

    validation_generator = test_datagen.flow(
        X_val, y_val
    )
    model = add_new_last_layer(base_model, y_train.shape[1])

    # transfer learning
    model=fine_tune(model)
    
    # Train model
    history = model.fit_generator(
        train_generator,
        steps_per_epoch=nb_train_samples / BATCH_SIZE,
        epochs=EPOCH,
        shuffle=True,
        validation_data=validation_generator,
        validation_steps=nb_validation_samples / BATCH_SIZE,
        callbacks=add_callback()
    )
Exemplo n.º 12
0
def model_app(arch, input_tensor):
    """Loads the appropriate convolutional neural network (CNN) model
      Args:
        arch: String key for model to be loaded.
        input_tensor: Keras tensor to use as image input for the model.
      Returns:
        model: The specified Keras Model instance with ImageNet weights loaded and without the top classification layer.
      """
    # function that loads the appropriate model
    if arch == 'Xception':
        model = applications.Xception(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('Xception loaded')
    elif arch == 'VGG16':
        model = applications.VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('VGG16 loaded')
    elif arch == 'VGG19':
        model = applications.VGG19(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('VGG19 loaded')
    elif arch == 'ResNet50':
        model = applications.ResNet50(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('ResNet50 loaded')
    elif arch == 'InceptionV3':
        model = applications.InceptionV3(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('InceptionV3 loaded')
    elif arch == 'InceptionResNetV2':
        model = applications.InceptionResNetV2(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('InceptionResNetV2 loaded')
    elif arch == 'MobileNet':
        model = applications.MobileNet(input_shape=(224, 224, 3), weights='imagenet', include_top=False,
                                       input_tensor=input_tensor)
        print('MobileNet loaded')
    elif arch == 'DenseNet121':
        model = applications.DenseNet121(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('DenseNet121 loaded')
    elif arch == 'NASNetLarge':
        model = applications.NASNetLarge(weights='imagenet', include_top=False, input_tensor=input_tensor)
        print('NASNetLarge loaded')
    elif arch == 'MobileNetV2':
        model = applications.MobileNetV2(input_shape=(224, 224, 3), weights='imagenet', include_top=False,
                                         input_tensor=input_tensor)
        print('MobileNetV2 loaded')
    else:
        print('Invalid model selected')
        model = False

    return model
    def create_model(self):
        base_model = applications.InceptionResNetV2(weights='imagenet',
                                                    include_top=False,
                                                    input_shape=(self.ROWS,
                                                                 self.COLS, 3))

        for layer in base_model.layers[:5]:
            layer.trainable = False

        add_model = Sequential()
        add_model.add(base_model)
        add_model.add(Flatten())
        add_model.add(Dropout(0.2))
        add_model.add(Dense(1024, activation='relu'))
        add_model.add(Dropout(0.3))
        add_model.add(Dense(1024, activation='relu'))
        add_model.add(Dense(2, activation='softmax'))

        self.model = add_model

        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
                           metrics=['accuracy'])
        self.model.summary()
Exemplo n.º 14
0
def test_inceptionresnetv2():
    model = applications.InceptionResNetV2(weights=None)
    assert model.output_shape == (None, 1000)
def train(train_path,
          val_path,
          test_path,
          batch_size=32,
          epochs=50,
          network='InceptionResNetV2',
          data_augmentation=True,
          mode='finetune',
          optimizer='Adadelta',
          fc=1,
          classes=5,
          gpu=1):
    '''
    Inputs:
        train_path: data path for train set (data should be stored like train/DR, train/Normal) 
        val_path: data path for validation set
        test_path: data path for test set
        batch_size: data sizes per step
        epochs: loop counts over whole train set
        network: {
            'InceptionResNetV2': fine-tune mode will train last 2 inception blocks
            'DenseNet201': fine-tune mode will train last Dense block
            'InceptionV3': fine-tune mode will train last 2 inception blocks
            'Xception'
            'NASNet'
            'MobileNetV2'
            'ResNet50': According to https://arxiv.org/pdf/1805.08974.pdf, it is most suitable for transfer learning?
        }
        data_augmentation: whether to do data augmentation or not
        mode: {
            'retrain': randomly initialize all layers and retrain the whole model
            'finetune': train specified layers
            'transfer' train fc layer(s)
        }
        optimizer: {
            'Adadelta'
            'RMSprop'
        }
        fc: {
            1: only one fc layer at last
            2: include two fc layers at last
        }
        classes: category counts
    '''
    if mode == 'retrain':
        include_top = False
        weights = None
        pooling = 'avg'
    else:
        include_top = False
        weights = 'imagenet'
        pooling = 'avg'

    if network == 'DenseNet201':
        from keras.applications.densenet import preprocess_input
        img_width, img_height = 224, 224
        base_model = applications.DenseNet201(include_top=include_top,
                                              weights=weights,
                                              pooling=pooling)
        # train last Dense Block
        if mode == 'finetune':
            trainable = False
            for layer in base_model.layers:
                if layer.name == 'conv5_block1_0_bn':
                    trainable = True
                layer.trainable = trainable

    if network == 'Xception':
        from keras.applications.xception import preprocess_input
        img_width, img_height = 299, 299
        base_model = applications.Xception(include_top=include_top,
                                           weights=weights,
                                           pooling=pooling)

    if network == 'InceptionV3':
        from keras.applications.inception_v3 import preprocess_input
        img_width, img_height = 299, 299
        base_model = applications.InceptionV3(include_top=include_top,
                                              weights=weights,
                                              pooling=pooling)
        # train top 2 inception blocks
        if mode == 'finetune':
            for layer in base_model.layers[:249]:
                layer.trainable = False
            for layer in base_model.layers[249:]:
                #print(layer.name)
                layer.trainable = True

    if network == 'InceptionResNetV2':
        from keras.applications.inception_resnet_v2 import preprocess_input
        img_width, img_height = 299, 299
        base_model = applications.InceptionResNetV2(include_top=include_top,
                                                    weights=weights,
                                                    pooling=pooling)
        # train top 1 inception blocks
        if mode == 'finetune':
            trainable = True
            for layer in base_model.layers:
                #print(layer.name)
                if layer.name == 'conv2d_9':
                    trainable = False
                if layer.name == 'conv2d_201':
                    trainable = True
                layer.trainable = trainable

    if network == 'NASNet':
        from keras.applications.nasnet import preprocess_input
        img_width, img_height = 331, 331
        base_model = applications.NASNetLarge(include_top=include_top,
                                              weights=weights,
                                              pooling=pooling)

    if network == 'MoblieNetV2':
        from keras.applications.mobilenetv2 import preprocess_input
        img_width, img_height = 224, 224
        base_model = applications.MobileNetV2(include_top=include_top,
                                              weights=weights,
                                              pooling=pooling)

    if network == 'ResNet50':
        from keras.applications.resnet50 import preprocess_input
        img_width, img_height = 224, 224
        base_model = applications.ResNet50(include_top=include_top,
                                           weights=weights,
                                           pooling=pooling)

    bottleneck = base_model.output
    if fc == 2:
        bottleneck = Dense(
            512,
            activation='relu',
            kernel_regularizer=keras.regularizers.l2(l=0.001))(bottleneck)
    predictions = Dense(
        classes,
        kernel_regularizer=keras.regularizers.l2(l=0.001),
        activation='softmax',
        bias_regularizer=keras.regularizers.l2(l=0.001))(bottleneck)
    model = Model(inputs=base_model.input, outputs=predictions)

    if mode == 'transfer':
        # train only the top layers (which were randomly initialized)
        # freeze all convolutional layers
        for layer in base_model.layers:
            layer.trainable = False

    if mode == 'retrain':
        # train a complete model
        for layer in base_model.layers:
            layer.trainable = True

    if optimizer == 'Adadelta':
        opt = optimizers.Adadelta()
    if optimizer == 'Adam':
        opt = optimizers.Adam()
    if optimizer == 'RMSprop':
        opt = optimizers.RMSprop(lr=0.005, rho=0.9, epsilon=1.0, decay=0.94)

    if gpu > 1:
        batch_size *= gpu
        model = multi_gpu_model(model, gpus=gpu)

    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if data_augmentation:
        # Initialize the train and test generators with data Augumentation
        train_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input,
            horizontal_flip=True,
            fill_mode="nearest",
            zoom_range=0.3,
            width_shift_range=0.3,
            height_shift_range=0.3,
            rotation_range=30)
        val_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input)

    else:
        train_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input)
        val_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(
        train_path,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode="categorical")

    validation_generator = val_datagen.flow_from_directory(
        val_path,
        target_size=(img_height, img_width),
        class_mode="categorical")

    test_generator = val_datagen.flow_from_directory(test_path,
                                                     target_size=(img_height,
                                                                  img_width),
                                                     class_mode="categorical")

    checkpoint = ModelCheckpoint("{}_{}_{}.h5".format(network, mode,
                                                      optimizer),
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=1)
    early = EarlyStopping(monitor='val_acc',
                          min_delta=0,
                          patience=10,
                          verbose=1,
                          mode='auto')

    model.fit_generator(train_generator,
                        epochs=epochs,
                        validation_data=validation_generator,
                        callbacks=[checkpoint, early])

    score = model.evaluate_generator(test_generator)

    print(score)
Exemplo n.º 16
0
 def target(queue, input_shape):
     model = applications.InceptionResNetV2(weights=None,
                                            include_top=False,
                                            input_shape=input_shape)
     queue.put(model.output_shape)
Exemplo n.º 17
0
 def target(queue):
     model = applications.InceptionResNetV2(weights=None)
     queue.put(model.output_shape)
Exemplo n.º 18
0
def creat_net(train_generator, validation_generator, batch_size, image_lengh,
              image_width):
    base_model = applications.InceptionResNetV2(weights='imagenet',
                                                include_top=False,
                                                input_shape=(image_width,
                                                             image_lengh, 3))
    for layer in base_model.layers:
        layer.trainable = False
    x = base_model.output
    x = tf.keras.layers.Conv2D(128,
                               kernel_size=(3, 3),
                               strides=(1, 1),
                               activation='relu')(x)
    x = tf.keras.layers.Dropout(0.2)(x)
    x = tf.keras.layers.GlobalAveragePooling2D()(x)
    x = tf.keras.layers.Dropout(0.2)(x)
    x = tf.keras.layers.Dense(4, activation='softmax')(x)
    model = Model(base_model.layers[0].input, x)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    #保存最优模型
    filepath = './模型/InceptionResNetV2_weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5'
    checkpoint = callbacks.ModelCheckpoint(filepath,
                                           monitor='val_accuracy',
                                           verbose=1,
                                           save_best_only=True,
                                           mode='max')
    early = callbacks.EarlyStopping(monitor='val_loss',
                                    min_delta=0,
                                    patience=0,
                                    verbose=0,
                                    mode='auto',
                                    baseline=None,
                                    restore_best_weights=False)
    model.fit_generator(train_generator,
                        epochs=30,
                        steps_per_epoch=1707 // batch_size,
                        validation_data=validation_generator,
                        validation_steps=264 // batch_size,
                        callbacks=[checkpoint, checkpoint])  #Reduce])

    #绘制误差和准确率曲线
    loss = model.history.history['loss']
    val_loss = model.history.history['val_loss']
    epoches = range(1, len(loss) + 1)
    acc = model.history.history['accuracy']
    val_acc = model.history.history['val_accuracy']
    plt.subplot(121)
    plt.plot(epoches, loss, 'bo', label='training_loss')
    plt.plot(epoches, val_loss, 'r', label='validation_loss')
    plt.xlabel('epoches')
    plt.ylabel('loss')
    plt.title('losses of train and val')
    plt.legend()
    plt.subplot(122)
    plt.plot(epoches, acc, 'bo', label='training_acc')
    plt.plot(epoches, val_acc, 'r', label='validation_acc')
    plt.xlabel('epoches')
    plt.ylabel('acc')
    plt.title('accuracy of train and val')
    plt.legend()
    plt.show()
Exemplo n.º 19
0
 def model_fn():
     return applications.InceptionResNetV2(weights=None,
                                           include_top=False,
                                           pooling='avg')
y_test = np.load("y_test.npy")

# Normalizing the images :--->
X_train = X_train/255
X_test = X_test/255
print("Done Normalizing!!!")

# Converting the class labels into binary class matrices
y_train = keras.utils.to_categorical(y_train,num_classes=6)
y_test = keras.utils.to_categorical(y_test,num_classes=6)

# Splitting 15% of training dataset into CV dataset
X_train, X_CV, y_train, y_CV = train_test_split(X_train, y_train, test_size=0.15, random_state=0)


res = applications.InceptionResNetV2(input_shape=(150,150,3), weights='imagenet', include_top=False)
res.trainable = False
print('inceptionRes pre trained model is loaded ....')

model = Sequential([res,
                    Flatten(),
                    Dense(400,activation='tanh'),
                    Dropout(0.5),
                    BatchNormalization(),
                    Dense(6,activation='softmax')
                    ])


early_stopping_callback = keras.callbacks.EarlyStopping(monitor='val_loss',patience=3)

model.compile(loss='categorical_crossentropy',
Exemplo n.º 21
0
def mySpatialModel(model_name,
                   spatial_size,
                   nb_classes,
                   channels,
                   weights_path=None):

    input_tensor = Input(shape=(channels, spatial_size, spatial_size))
    input_shape = (channels, spatial_size, spatial_size)
    base_model = None
    predictions = None
    data_dim = 1024
    if model_name == 'ResNet50':

        input_tensor = Input(shape=(spatial_size, spatial_size, channels))
        input_shape = (spatial_size, spatial_size, channels)

        base_model = kerasApp.ResNet50(include_top=False,
                                       input_tensor=input_tensor,
                                       input_shape=input_shape,
                                       weights=weights_path,
                                       classes=nb_classes,
                                       pooling=None)
        x = base_model.output
        # 添加自己的全链接分类层 method 1
        #x = Flatten()(x)
        #predictions = Dense(nb_classes, activation='softmax')(x)
        #method 2
        x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu')(x)
        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)
    elif model_name == 'VGG16':
        input_tensor = Input(shape=(spatial_size, spatial_size, channels))
        input_shape = (spatial_size, spatial_size, channels)
        base_model = kerasApp.VGG16(include_top=False,
                                    input_tensor=input_tensor,
                                    input_shape=input_shape,
                                    weights=weights_path,
                                    classes=nb_classes,
                                    pooling=None)
        x = base_model.output
        x = GlobalAveragePooling2D()(
            x)  # add a global spatial average pooling layer
        x = Dense(1024,
                  activation='relu')(x)  # let's add a fully-connected layer
        predictions = Dense(nb_classes,
                            activation='softmax')(x)  # and a logistic layer
        model = Model(inputs=base_model.input, outputs=predictions)
    elif model_name == 'VGG19':
        base_model = kerasApp.VGG19(include_top=False,
                                    input_tensor=input_tensor,
                                    input_shape=input_shape,
                                    weights=weights_path,
                                    classes=2,
                                    pooling=None)

        x = base_model.output
        # 添加自己的全链接分类层
        x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu')(x)
        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)

    elif model_name == 'InceptionV3':
        input_tensor = Input(shape=(spatial_size, spatial_size, channels))
        input_shape = (spatial_size, spatial_size, channels)
        base_model = kerasApp.InceptionV3(weights=weights_path,
                                          include_top=False,
                                          pooling=None,
                                          input_shape=input_shape,
                                          classes=nb_classes)

        x = base_model.output
        # 添加自己的全链接分类层
        x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu')(x)
        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)
    elif model_name == 'InceptionResNetV2':
        input_tensor = Input(shape=(spatial_size, spatial_size, channels))
        input_shape = (
            spatial_size,
            spatial_size,
            channels,
        )
        base_model = kerasApp.InceptionResNetV2(weights=weights_path,
                                                include_top=False,
                                                pooling=None,
                                                input_shape=input_shape,
                                                classes=nb_classes)

        x = base_model.output
        # 添加自己的全链接分类层
        x = GlobalAveragePooling2D()(x)
        data_dim = 1536
        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)
    elif model_name == 'Xception':
        input_shape_xception = (spatial_size, spatial_size, channels)

        base_model = kerasApp.Xception(weights=weights_path,
                                       include_top=False,
                                       pooling="avg",
                                       input_shape=input_shape_xception,
                                       classes=nb_classes)
        x = base_model.output
        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)

    elif model_name == 'DenseNet121':
        base_model = kerasApp.DenseNet121(weights=weights_path,
                                          include_top=False,
                                          pooling=None,
                                          input_shape=input_shape,
                                          classes=nb_classes)

        x = base_model.output
        # 添加自己的全链接分类层
        x = GlobalAveragePooling2D()(x)

        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)
    elif model_name == 'DenseNet169':
        base_model = kerasApp.DenseNet169(weights=weights_path,
                                          include_top=False,
                                          pooling=None,
                                          input_shape=input_shape,
                                          classes=nb_classes)

        x = base_model.output
        # 添加自己的全链接分类层
        x = GlobalAveragePooling2D()(x)

        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)
    elif model_name == 'DenseNet201':
        base_model = kerasApp.DenseNet201(weights=weights_path,
                                          include_top=False,
                                          pooling=None,
                                          input_shape=input_shape,
                                          classes=nb_classes)

        x = base_model.output
        # 添加自己的全链接分类层
        x = GlobalAveragePooling2D()(x)
        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)
    elif model_name == 'MobileNet':
        base_model = kerasApp.MobileNet(weights=weights_path,
                                        include_top=False,
                                        pooling=None,
                                        input_shape=input_shape,
                                        classes=nb_classes)
        x = base_model.output
        # 添加自己的全链接分类层
        x = GlobalAveragePooling2D()(x)
        x = Dense(1024, activation='relu')(x)
        x = Dense(1024, activation='relu')(x)
        x = Dense(512, activation='relu')(x)
        data_dim = 512
        predictions = Dense(nb_classes, activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=predictions)
    else:
        print("this model--[" + model_name + "]-- doesnt exist!")

    # 冻结base_model所有层,这样就可以正确获得bottleneck特征
    for layer in base_model.layers:
        layer.trainable = True
    # 训练模型
    model = Model(inputs=base_model.input, outputs=predictions)

    print('-------------当前base_model模型[' + model_name +
          "]-------------------\n")
    print('base_model层数目:' + str(len(base_model.layers)))
    print('model模型层数目:' + str(len(model.layers)))
    featureLayer = model.layers[len(model.layers) - 2]
    print(featureLayer.output_shape)
    print("data_dim:" + str(featureLayer.output_shape[1]))
    print("---------------------------------------------\n")

    #sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)

    # 绘制模型
    #if plot_model:
    #	plot_model(model, to_file=model_name+'.png', show_shapes=True)
    return model
Exemplo n.º 22
0
def mySpatialModelChannelTest(model_name,spatial_size, nb_classes, channels, channel_first=True, weights_path=None,
				   lr=0.005, decay=1e-6, momentum=0.9,plot_model=True):

	input_tensor = Input(shape=(channels, spatial_size, spatial_size))
	input_shape = (channels, spatial_size, spatial_size)
	base_model=None
	predictions=None
	data_dim=1024

	base_model = kerasApp.ResNet50(include_top=False, input_tensor=input_tensor, input_shape=input_shape,
									   weights=None, classes=nb_classes, pooling=None)
	x = base_model.output
	x = GlobalAveragePooling2D()(x)
	x = Dense(1024, activation='relu')(x)
	predictions = Dense(nb_classes, activation='softmax')(x)
	# 训练模型
	model = Model(inputs=base_model.input, outputs=predictions)
	print_shape(model,model_name)


	base_model = kerasApp.VGG16(include_top=False, input_tensor=input_tensor, input_shape=input_shape,
								   weights=None, classes=nb_classes, pooling=None)
	x = base_model.output
	# 添加自己的全链接分类层
	x = GlobalAveragePooling2D()(x)  # add a global spatial average pooling layer
	x = Dense(1024, activation='relu')(x)  # let's add a fully-connected layer
	predictions = Dense(nb_classes, activation='softmax')(x)
	# 训练模型
	model = Model(inputs=base_model.input, outputs=predictions)
	print_shape(model, model_name)

	base_model = kerasApp.VGG19(include_top=False, input_tensor=input_tensor, input_shape=input_shape,
								weights=None, classes=2, pooling='avg')
	print_shape(base_model, model_name)
	base_model = kerasApp.InceptionV3(weights=None, include_top=False, pooling=None,
							 input_shape=input_shape, classes=nb_classes)
	print_shape(base_model, model_name)
	base_model = kerasApp.InceptionResNetV2(weights=None, include_top=False, pooling=None,
							 input_shape=input_shape, classes=nb_classes)
	x = base_model.output
	# 添加自己的全链接分类层
	x = GlobalAveragePooling2D()(x)
	predictions = Dense(nb_classes, activation='softmax')(x)
	# 训练模型
	model = Model(inputs=base_model.input, outputs=predictions)
	print_shape(model, model_name)
	#channel last
	input_tensor_Xception = Input(shape=( spatial_size, spatial_size,channels))
	input_shape__Xception = (spatial_size, spatial_size,channels)
	base_model = kerasApp.Xception(weights=None, include_top=False, pooling=None,
											input_shape=input_shape__Xception, classes=nb_classes)
	print_shape(base_model, model_name)

	base_model = kerasApp.DenseNet121(weights=None, include_top=False, pooling=None,
											input_shape=input_shape, classes=nb_classes)
	print_shape(base_model, model_name)

	base_model = kerasApp.DenseNet169(weights=None, include_top=False, pooling=None,
											input_shape=input_shape, classes=nb_classes)

	print_shape(base_model, model_name)

	base_model = kerasApp.DenseNet201(weights=None, include_top=False, pooling=None,
											input_shape=input_shape, classes=nb_classes)

	print_shape(base_model, model_name)
	input_shape = (channels, spatial_size, spatial_size)

	base_model = kerasApp.MobileNet(weights=None, include_top=False, pooling=None,
												  input_shape=input_shape, classes=nb_classes)
Exemplo n.º 23
0
# import keras
import plaidml.keras
plaidml.keras.install_backend()
import plaidml.keras.backend as K
import keras.applications as kapp
from keras.datasets import cifar10

(x_train, y_train_cats), (x_test, y_test_cats) = cifar10.load_data()
batch_size = 8
x_train = x_train[:batch_size]
# x_train = np.repeat(np.repeat(x_train, 7, axis=1), 7, axis=2)
x_train = np.resize(x_train, (x_train.shape[0], 299, 299, x_train.shape[-1]))

K.IVPL_DEVICE_NO = 0
model_1 = kapp.InceptionResNetV2()
model_1.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])

print("Running initial batch (compiling tile program)")
model_1._function_kwargs['dev_no'] = 0
model_1.predict(x=x_train, batch_size=batch_size)

K.IVPL_DEVICE_NO = 1
model_2 = kapp.InceptionResNetV2()
model_2.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])

print("Running initial batch (compiling tile program)")
model_2._function_kwargs['dev_no'] = 1
model_2.predict(x=x_train, batch_size=batch_size)

def Predict(model, x, size):
Exemplo n.º 24
0
    beta = 2
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)), axis=1)
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)), axis=1)
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)), axis=1)

    precision = true_positives / (predicted_positives + K.epsilon())
    recall = true_positives / (possible_positives + K.epsilon())

    return K.mean(((1 + beta**2) * precision * recall) /
                  ((beta**2) * precision + recall + K.epsilon()))


# In[9]:

model = applications.InceptionResNetV2(weights=None,
                                       include_top=False,
                                       input_shape=(img_size, img_size, 3))
model.load_weights(
    '../input/inceptionresnetv2/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
)

# In[ ]:

model.trainable = False

# In[ ]:

# Freeze some layers
# for layer in model.layers[:-4]:
#     layer.trainable = False
Exemplo n.º 25
0
def test_inceptionresnetv2_pooling():
    model = applications.InceptionResNetV2(weights=None, include_top=False, pooling='avg')
    assert model.output_shape == (None, 1536)
                                    input_shape=(256, 256, 3))
elif args.base_model == 'vgg19':
    base_model = applications.VGG19(weights='imagenet',
                                    include_top=False,
                                    input_shape=(256, 256, 3))
elif args.base_model == 'resnet50':
    base_model = applications.ResNet50(weights='imagenet',
                                       include_top=False,
                                       input_shape=(256, 256, 3))
elif args.base_model == 'inceptionv3':
    base_model = applications.InceptionV3(weights='imagenet',
                                          include_top=False,
                                          input_shape=(256, 256, 3))
elif args.base_model == 'inception_resnetv2':
    base_model = applications.InceptionResNetV2(weights='imagenet',
                                                include_top=False,
                                                input_shape=(256, 256, 3))
elif args.base_model == 'xception':
    base_model = applications.Xception(weights='imagenet',
                                       include_top=False,
                                       input_shape=(256, 256, 3))
elif args.base_model == 'densenet121':
    base_model = applications.DenseNet121(weights='imagenet',
                                          include_top=False,
                                          input_shape=(256, 256, 3))
elif args.base_model == 'densenet169':
    base_model = applications.DenseNet169(weights='imagenet',
                                          include_top=False,
                                          input_shape=(256, 256, 3))
elif args.base_model == 'densenet201':
    base_model = applications.DenseNet201(weights='imagenet',
Exemplo n.º 27
0
 def model_fn():
     return applications.InceptionResNetV2(weights=None)
	val_gen = ImageDatgaGenerator(rescale = 1./255,featurewise_std_normalization=True)
    val_generator = val_gen.flow_from_directory(test_dir,target_size = (img_height, img_width),
	y_p,y_t=eval_model(model1,model2,val_generator,num_test_samples)
	acc=np.round(accuracy_score(y_p,y_t),3)+alpha
    f1=np.round(f1_score(y_p,y_t,average='macro'),3)+alpha
    pre=np.round(precision_score(y_p,y_t,average='macro'),3)+alpha
    re=np.round(recall_score(y_p,y_t,average='macro'),3)
	if(skip_training==False):
	    train_datagen = ImageDataGenerator(rescale = 1./255,featurewise_std_normalization=True)
        val_datagen = ImageDataGenerator(rescale = 1./255,featurewise_std_normalization=True)
        train_generator = train_datagen.flow_from_directory(train_dir,target_size = (img_height, img_width),batch_size = batch_size, 
		class_mode = "categorical")
        val_generator = val_datagen.flow_from_directory(val_dir,target_size = (img_height, img_width),batch_size = batch_size,
		class_mode = "categorical")
		
	    model1=applications.InceptionResNetV2(weights = "imagenet", include_top=False, input_shape = (img_width, img_height, 3)) 
        x = model1.output
        x=Dropout(0.5)(x)
        x = Flatten()(x)
        output = Dense(num_classes, activation="softmax")(x)
        model1 = Model(input = model.input, output = output)
		
		
        model1.compile(loss = "categorical_crossentropy", optimizer = 'adam', metrics=["accuracy"])
        checkpoint = ModelCheckpoint("inceptionresnet.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
        reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=3, min_lr=0.0001,mode='auto')
        callbacks=[checkpoint,reduce_lr]
        history=model1.fit_generator(
        train_generator,samples_per_epoch =num_train_samples,epochs = epochs,validation_data = val_generator,
		validation_steps =math.ceil(num_val_samples//(batch_size)),callbacks = callbacks,verbose=1)
        log.append(history)
Exemplo n.º 29
0
 def model_fn(input_shape):
     return applications.InceptionResNetV2(weights=None,
                                           include_top=False,
                                           input_shape=input_shape)
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1.0
config.gpu_options.visible_device_list = "1"
set_session(tf.Session(config=config))

from keras import backend as K
K.set_image_dim_ordering('th')  # a lot of old examples of CNNs

from keras.models import Sequential, model_from_json, Model, load_model
from keras.layers import Dense, Activation, Flatten, Dropout, Convolution2D, MaxPooling2D
from keras import applications
from keras import optimizers

base_model = applications.InceptionResNetV2(include_top=False,
                                            input_shape=(1, img_rows,
                                                         img_cols))

add_model = Sequential()
add_model.add(Flatten(input_shape=base_model.output_shape[1:]))
add_model.add(Dense(128, activation='relu'))
add_model.add(Dropout(0.5))
add_model.add(Dense(128, activation='relu'))
add_model.add(Dropout(0.5))
add_model.add(Dense(nb_classes, activation='softmax'))

model = Model(inputs=base_model.input, outputs=add_model(base_model.output))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])