Esempio n. 1
0
def simpleCNN(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS):
    """First version of simple CNN
    """
    from tf.keras.models import Sequential
    from tf.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization

    model = Sequential()

    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(
        IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    # 2 because we have cat and dog classes
    model.add(Dense(2, activation='softmax'))

    return model
Esempio n. 2
0
    def _create_model_2(self, input_tensor, input_shape, num_classes,
                        dropout_rate):
        #input_tensor = Input(shape=input_shape)

        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(5, 5),
                   padding='same',
                   activation='relu',
                   input_shape=input_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(
            Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

        model.add(Flatten())

        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(dropout_rate))

        if 1 == num_classes:
            model.add(Dense(1, activation='sigmoid'))
            #model.add(Dense(1, activation='sigmoid', activity_regularizer=keras.regularizers.activity_l2(0.0001)))
        elif num_classes >= 2:
            model.add(Dense(num_classes, activation='softmax'))
            #model.add(Dense(num_classes, activation='softmax', activity_regularizer=keras.regularizers.activity_l2(0.0001)))
        else:
            assert num_classes > 0, 'Invalid number of classes.'

        # Display the model summary.
        #model.summary()

        return model(input_tensor)
Esempio n. 3
0
    def _create_model_1(self, input_tensor, num_classes, dropout_rate):
        x = Conv2D(32, kernel_size=(5, 5), padding='same',
                   activation='relu')(input_tensor)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

        x = Conv2D(64, kernel_size=(3, 3), padding='same',
                   activation='relu')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

        x = Flatten()(x)

        x = Dense(1024, activation='relu')(x)
        x = Dropout(dropout_rate)(x)

        if 1 == num_classes:
            x = Dense(1, activation='sigmoid')(x)
            #x = Dense(1, activation='sigmoid', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x)
        elif num_classes >= 2:
            x = Dense(num_classes, activation='softmax')(x)
            #x = Dense(num_classes, activation='softmax', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x)
        else:
            assert num_classes > 0, 'Invalid number of classes.'

        #model = Model(inputs=input_tensor, outputs=x)

        return x
Esempio n. 4
0
def simpleCNN_v2(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS):
    """Second version of simple CNN,
    added with 2 CNN layers and 1 FC layer.
    """
    from tf.keras.models import Sequential
    from tf.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization

    model = Sequential()

    model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(
        IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(1024, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))

    model.add(Dense(1024, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    # 2 because we have cat and dog classes
    model.add(Dense(2, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop', metrics=['accuracy'])

    return model
Esempio n. 5
0
 def feature_extractor_network(self):
     # input
     in_image = Input(shape = in_shape)
     # C1 Layer
     nett = Conv2D(32,(5,5))(in_image)		
     nett = BatchNormalization()(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # M2 Layer
     nett = MaxPooling2D(pool_size = (3,3))(nett)
     # C3 Layer
     nett = Conv2D(64,(3,3))		
     nett = BatchNormalization(pool_size = (3,3))(nett)
     nett = LeakyReLU(alpha = 0.2)(nett)
     # L4 Layer
     nett = LocallyConnected2D(128,(3,3))(nett)
     # L5 Layer
     nett = LocallyConnected2D(256,(3,3))(nett)
     # F6 Layer
     nett = Dense(512,activation='relu')(nett)
     nett = Dropout(0.2)(nett)
     # F7 Layer 
     out_features = Dense(activation='tanh')(nett)
     # output
     model = Model(inputs = in_image, outputs = out_features)
     return model
Esempio n. 6
0
def zero_padding_block(input_tensor,
                       filters,
                       stage,
                       block,
                       se_enabled=False,
                       se_ratio=16):
    numFilters1, numFilters2 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    # downsampling directly by convolution with stride 2
    x = Conv2D(numFilters1, (3, 3),
               strides=(2, 2),
               kernel_initializer='he_normal',
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(numFilters2, (3, 3),
               kernel_initializer='he_normal',
               name=conv_name_base + '2b')(x)
    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)

    # zero padding and downsampling with 1x1 conv shortcut connection
    x_shortcut = Conv2D(1, (1, 1),
                        strides=(2, 2),
                        kernel_initializer='he_normal',
                        name=conv_name_base + '1')(input_tensor)
    x_shortcut2 = MaxPooling2D(pool_size=(1, 1),
                               strides=(2, 2),
                               border_mode='same')(input_tensor)
    x_shortcut = Lambda(zeropad, output_shape=zeropad_output_shape)(x_shortcut)

    x_shortcut = BatchNormalization(axis=bn_axis,
                                    name=bn_name_base + '1')(x_shortcut)

    # addition of shortcut
    x = Add()([x, x_shortcut])
    x = Activation('relu')(x)

    return x
Esempio n. 7
0
            kernel_initializer='he_normal',
            bias_initializer='zeros')(inputs)
a1 = BatchNormalization()(a1)
a1 = Relu()(a1)

a2 = Conv2D(24,
            3,
            1,
            "same",
            kernel_initializer='he_normal',
            bias_initializer='zeros')(a1)
a2 = BatchNormalization()(a2)
a2 = Relu()(a2)
a2 = Merge
#---------------------------------------------
b1 = MaxPooling2D((2, 2), padding='valid')(a2)

b1 = Conv2D(48,
            3,
            1,
            "same",
            kernel_initializer='he_normal',
            bias_initializer='zeros')(b1)
b1 = BatchNormalization()(b1)
b1 = Relu()(b1)

b2 = Conv2D(48,
            3,
            1,
            "same",
            kernel_initializer='he_normal',