예제 #1
0
    def model(self):
        input_layer = Input(shape=self.SHAPE)

        down_1 = Convolution2D(64,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               activation=LeakyReLU(alpha=0.2))(input_layer)

        down_2 = Convolution2D(64 * 2,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               activation=LeakyReLU(alpha=0.2))(down_1)
        norm_2 = BatchNormalization()(down_2)

        down_3 = Convolution2D(64 * 4,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               activation=LeakyReLU(alpha=0.2))(norm_2)
        norm_3 = BatchNormalization()(down_3)

        down_4 = Convolution2D(64 * 8,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               activation=LeakyReLU(alpha=0.2))(norm_3)
        norm_4 = BatchNormalization()(down_4)

        down_5 = Convolution2D(64 * 8,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               activation=LeakyReLU(alpha=0.2))(norm_4)
        norm_5 = BatchNormalization()(down_5)

        down_6 = Convolution2D(64 * 8,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               activation=LeakyReLU(alpha=0.2))(norm_5)
        norm_6 = BatchNormalization()(down_6)

        down_7 = Convolution2D(64 * 8,
                               kernel_size=4,
                               strides=2,
                               padding='same',
                               activation=LeakyReLU(alpha=0.2))(norm_6)
        norm_7 = BatchNormalization()(down_7)

        upsample_1 = UpSampling2D(size=2)(norm_7)
        up_conv_1 = Convolution2D(64 * 8,
                                  kernel_size=4,
                                  strides=1,
                                  padding='same',
                                  activation='relu')(upsample_1)
        norm_up_1 = BatchNormalization(momentum=0.8)(up_conv_1)
        add_skip_1 = Concatenate()([norm_up_1, norm_6])

        upsample_2 = UpSampling2D(size=2)(add_skip_1)
        up_conv_2 = Convolution2D(64 * 8,
                                  kernel_size=4,
                                  strides=1,
                                  padding='same',
                                  activation='relu')(upsample_2)
        norm_up_2 = BatchNormalization(momentum=0.8)(up_conv_2)
        add_skip_2 = Concatenate()([norm_up_2, norm_5])

        upsample_3 = UpSampling2D(size=2)(add_skip_2)
        up_conv_3 = Convolution2D(64 * 8,
                                  kernel_size=4,
                                  strides=1,
                                  padding='same',
                                  activation='relu')(upsample_3)
        norm_up_3 = BatchNormalization(momentum=0.8)(up_conv_3)
        add_skip_3 = Concatenate()([norm_up_3, norm_4])

        upsample_4 = UpSampling2D(size=2)(add_skip_3)
        up_conv_4 = Convolution2D(64 * 4,
                                  kernel_size=4,
                                  strides=1,
                                  padding='same',
                                  activation='relu')(upsample_4)
        norm_up_4 = BatchNormalization(momentum=0.8)(up_conv_4)
        add_skip_4 = Concatenate()([norm_up_4, norm_3])

        upsample_5 = UpSampling2D(size=2)(add_skip_4)
        up_conv_5 = Convolution2D(64 * 2,
                                  kernel_size=4,
                                  strides=1,
                                  padding='same',
                                  activation='relu')(upsample_5)
        norm_up_5 = BatchNormalization(momentum=0.8)(up_conv_5)
        add_skip_5 = Concatenate()([norm_up_5, norm_2])

        upsample_6 = UpSampling2D(size=2)(add_skip_5)
        up_conv_6 = Convolution2D(64,
                                  kernel_size=4,
                                  strides=1,
                                  padding='same',
                                  activation='relu')(upsample_6)
        norm_up_6 = BatchNormalization(momentum=0.8)(up_conv_6)
        add_skip_6 = Concatenate()([norm_up_6, down_1])

        last_upsample = UpSampling2D(size=2)(add_skip_6)
        output_layer = Convolution2D(self.C,
                                     kernel_size=4,
                                     strides=1,
                                     padding='same',
                                     activation='tanh')(last_upsample)

        return Model(input_layer, output_layer)
예제 #2
0
def load_model_cnn(input_dim,
                   data_set_size,
                   l2_reg=0.01,
                   activation='relu',
                   hidden_layer_size=64,
                   hidden_layer_num=3,
                   dropout_rate=0.25):
    print('Building CNN-Model.')

    model = Sequential()

    print('SIZEEEEE:  ', input_dim)

    # Perfect hidden size:  h, h*2
    # Perfect pool size  :  8, 8

    # Convolutional Layer 1
    model.add(
        Convolution2D(hidden_layer_size,
                      1,
                      3,
                      border_mode='same',
                      input_shape=(1, 1, input_dim)))
    model.add(Activation(activation))
    model.add(Convolution2D(hidden_layer_size, 1, 3, border_mode='same'))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(1, 8), border_mode='valid'))
    model.add(Dropout(dropout_rate))

    # Convolutional Layer 2
    model.add(Convolution2D(hidden_layer_size * 2, 1, 3, border_mode='same'))
    model.add(Activation(activation))
    model.add(Convolution2D(hidden_layer_size * 2, 1, 3, border_mode='same'))
    model.add(Activation(activation))
    model.add(MaxPooling2D(pool_size=(1, 8), border_mode='valid'))
    model.add(Dropout(dropout_rate))

    # Fully connected Layer
    model.add(Flatten())

    model.add(Dense(1280, W_regularizer=l2(l2_reg), init='lecun_uniform'))
    model.add(BatchNormalization(epsilon=0.001, mode=0))
    model.add(Activation(activation))
    model.add(Dropout(dropout_rate / 2))

    # model.add(Dense(hidden_layer_size * 4, W_regularizer=l2(l2_reg),
    #                 init='lecun_uniform'))
    # model.add(BatchNormalization(epsilon=0.001, mode=0))
    # model.add(Activation(activation))
    # model.add(Dropout(dropout_rate / 2))

    model.add(
        Dense(hidden_layer_size,
              W_regularizer=l2(l2_reg),
              init='lecun_uniform'))
    model.add(BatchNormalization(epsilon=0.001, mode=0))
    model.add(Activation(activation))
    model.add(Dropout(dropout_rate / 2))

    # model.add(Dense(hidden_layer_size * 4, W_regularizer=l2(l2_reg),
    #                 init='lecun_uniform'))
    # model.add(BatchNormalization(epsilon=0.001, mode=0))
    # model.add(Activation(activation))
    # model.add(Dropout(dropout_rate / 2))

    model.add(Dense(1280, W_regularizer=l2(l2_reg), init='lecun_uniform'))
    model.add(BatchNormalization(epsilon=0.001, mode=0))
    model.add(Activation(activation))
    model.add(Dropout(dropout_rate / 2))

    model.add(Reshape((hidden_layer_size * 2, 1, int(int(input_dim / 8) / 8))))

    # Deconvolution 1:
    # model.add(Unpooling2D(poolsize=(1, 8)))
    # model.add(UpSampling2D((1, 8)))
    # model.add(Convolution2D(hidden_layer_size * 2, 1, 3))
    # model.add(Deconvolution2D(hidden_layer_size * 2, 1, 3,
    #                           (None, hidden_layer_size * 2, 1, int(input_dim/8))))
    # model.add(Activation(activation))
    # model.add(Deconvolution2D(hidden_layer_size * 2, 1, 3,
    #                           (None, hidden_layer_size, 1, int(input_dim/8))))
    # model.add(Activation(activation))
    #
    # # model.add(Unpooling2D(poolsize=(1, 8)))
    # model.add(UpSampling2D((1, 8)))
    # model.add(Deconvolution2D(hidden_layer_size, 1, 3,
    #                           (None, hidden_layer_size, 1, input_dim)))
    # model.add(Activation(activation))
    # model.add(Deconvolution2D(hidden_layer_size, 1, 3,
    #                           (None, 1, 1, input_dim)))
    # model.add(Activation(activation))
    # model.add(Deconvolution2D(1, 1, 3,
    #                           (None, 1, 1, input_dim)))
    # model.add(Activation(activation))

    model.add(UpSampling2D((1, 8)))
    model.add(Convolution2D(hidden_layer_size * 2, 1, 3, border_mode='same'))
    model.add(Activation(activation))
    model.add(Convolution2D(hidden_layer_size * 2, 1, 3, border_mode='same'))
    model.add(Activation(activation))

    model.add(UpSampling2D((1, 8)))
    model.add(Convolution2D(hidden_layer_size, 1, 3, border_mode='same'))
    model.add(Activation(activation))
    model.add(Convolution2D(hidden_layer_size, 1, 3, border_mode='same'))
    model.add(Activation(activation))
    model.add(Convolution2D(1, 1, 3, border_mode='same'))
    model.add(Activation(activation))

    model.add(Activation("linear"))

    model.summary()

    return model
예제 #3
0
def generator_upsampling(cat_dim, cont_dim, noise_dim, img_dim, model_name="generator_upsampling", dset="mnist"):
    """
    Generator model of the DCGAN

    args : img_dim (tuple of int) num_chan, height, width
           pretr_weights_file (str) file holding pre trained weights

    returns : model (keras NN) the Neural Net model
    """

    s = img_dim[1]
    f = 128

    if dset == "mnist":
        start_dim = int(s / 4)
        nb_upconv = 2
    elif dset == "celebA":
        start_dim = int(s / 16)
        nb_upconv = 4
    else:
        o = s
        nb_upconv = 0
        while o > 7:
            o = o/2
            nb_upconv += 1
        start_dim = int(o)

    if K.image_dim_ordering() == "th":
        bn_axis = 1
        reshape_shape = (f, start_dim, start_dim)
        output_channels = img_dim[0]
    else:
        reshape_shape = (start_dim, start_dim, f)
        bn_axis = -1
        output_channels = img_dim[-1]

    cat_input = Input(shape=cat_dim, name="cat_input")
    cont_input = Input(shape=cont_dim, name="cont_input")
    noise_input = Input(shape=noise_dim, name="noise_input")

    # gen_input = merge([cat_input, cont_input, noise_input], mode="concat")
    gen_input = Concatenate()([noise_input, cont_input, cat_input])

    x = Dense(1024)(gen_input)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Dense(f * start_dim * start_dim)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Reshape(reshape_shape)(x)

    # Upscaling blocks
    for i in range(nb_upconv):
        x = UpSampling2D(size=(2, 2))(x)
        nb_filters = int(f / (2 ** (i + 1)))
        x = Conv2D(nb_filters, (3, 3), padding="same")(x)
        x = BatchNormalization(axis=bn_axis)(x)
        x = Activation("relu")(x)
        # x = Conv2D(nb_filters, (3, 3), padding="same")(x)
        # x = BatchNormalization(axis=bn_axis)(x)
        # x = Activation("relu")(x)

    x = Conv2D(output_channels, (3, 3), name="gen_Conv2D_final", padding="same", activation='tanh')(x)

    generator_model = Model(inputs=[cat_input, cont_input, noise_input], outputs=[x], name=model_name)

    return generator_model
예제 #4
0
def create_segnet(args, indices=True, ker_init="he_normal") -> tModel:

    input_shape = tuple(args.input_shape)
    encoder = VGG16_encoder(input_shape, init=True)

    L = [layer for i, layer in enumerate(encoder.layers)]  # type: List[Layer]
    #for layer in L: layer.trainable = False # freeze VGG16
    L.reverse()

    x = encoder.output
    x = Dropout(0.5)(x)
    # Block 5
    if indices:
        x = DePool2D(L[0],
                     size=L[0].pool_size,
                     input_shape=encoder.output_shape[1:])(x)
    else:
        x = UpSampling2D(size=L[0].pool_size,
                         input_shape=encoder.output_shape[1:])(x)
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[1].filters,
        L[1].kernel_size,
        padding=L[1].padding,
        kernel_initializer=ker_init)(x)))
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[2].filters,
        L[2].kernel_size,
        padding=L[2].padding,
        kernel_initializer=ker_init)(x)))
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[3].filters,
        L[3].kernel_size,
        padding=L[3].padding,
        kernel_initializer=ker_init)(x)))
    x = Dropout(0.5)(x)
    # Block 4
    if indices: x = DePool2D(L[4], size=L[4].pool_size)(x)
    else: x = UpSampling2D(size=L[4].pool_size)(x)
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[5].filters,
        L[5].kernel_size,
        padding=L[5].padding,
        kernel_initializer=ker_init)(x)))
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[6].filters,
        L[6].kernel_size,
        padding=L[6].padding,
        kernel_initializer=ker_init)(x)))
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[7].filters,
        L[7].kernel_size,
        padding=L[7].padding,
        kernel_initializer=ker_init)(x)))
    x = Dropout(0.5)(x)
    # Block 3
    if indices: x = DePool2D(L[8], size=L[8].pool_size)(x)
    else: x = UpSampling2D(size=L[8].pool_size)(x)
    # x = ZeroPadding2D(padding=(0, 1))(x)
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[10].filters,
        L[10].kernel_size,
        padding=L[10].padding,
        kernel_initializer=ker_init)(x)))
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[11].filters,
        L[11].kernel_size,
        padding=L[11].padding,
        kernel_initializer=ker_init)(x)))
    x = Dropout(0.5)(x)
    # Block 2
    if indices: x = DePool2D(L[12], size=L[12].pool_size)(x)
    else: x = UpSampling2D(size=L[12].pool_size)(x)
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[13].filters,
        L[13].kernel_size,
        padding=L[13].padding,
        kernel_initializer=ker_init)(x)))
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[14].filters,
        L[14].kernel_size,
        padding=L[14].padding,
        kernel_initializer=ker_init)(x)))
    # Block 1
    if indices: x = DePool2D(L[15], size=L[15].pool_size)(x)
    else: x = UpSampling2D(size=L[15].pool_size)(x)
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[16].filters,
        L[16].kernel_size,
        padding=L[16].padding,
        kernel_initializer=ker_init)(x)))
    x = Activation('relu')(BatchNormalization()(Conv2D(
        L[17].filters,
        L[17].kernel_size,
        padding=L[17].padding,
        kernel_initializer=ker_init)(x)))

    x = Conv2D(args.num_classes, (1, 1),
               padding='valid',
               kernel_initializer=ker_init)(x)

    if args.num_classes == 1:
        x = Activation('sigmoid')(x)
    elif args.num_classes > 1:
        x = Activation('softmax')(x)

    predictions = x

    segnet = Model(inputs=encoder.inputs, outputs=predictions)  # type: tModel

    return segnet
def segnet(input_shape= (512, 512, 3), classes=5):
    
    img_input = Input(shape=input_shape)
    X = img_input
    
    # Encoding layers
    # Block 1 
    X = Convolution2D(64, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(64, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = MaxPooling2D()(X)
    
    # Block 2
    X = Convolution2D(128, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(128, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = MaxPooling2D()(X)
    
    # Block 3
    X = Convolution2D(256, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(256, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(256, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = MaxPooling2D()(X)
    
    # Block 4
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = MaxPooling2D()(X)
    
    # Block 5
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = MaxPooling2D()(X)
    
    # Decoding layers
    # Block 1
    X = UpSampling2D()(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    
    # Block 2
    X = UpSampling2D()(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(512, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(256, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    
    # Block 3
    X = UpSampling2D()(X)
    X = Convolution2D(256, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(256, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(128, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    
    # Block 4
    X = UpSampling2D()(X)
    X = Convolution2D(128, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(64, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    
    # Block 5
    X = UpSampling2D()(X)
    X = Convolution2D(64, (kernel, kernel), padding="same")(X)
    X = BatchNormalization()(X)
    X = Activation("relu")(X)
    X = Convolution2D(classes, (1, 1), padding="valid")(X)
    X = BatchNormalization()(X)
    X = Reshape((input_shape[0]*input_shape[1], classes), input_shape = input_shape)(X)
    X = Permute((1, 2))(X)
    X = Activation("softmax")(X)
    model = Model(img_input, X)
    return model
    
예제 #6
0
def create_up_sampling_module(input_layer, n_filters, size=(2, 2)):
    up_sample = UpSampling2D(size=size)(input_layer)
    convolution = create_convolution_block(up_sample, n_filters)
    return convolution
예제 #7
0
def VGGSegNet(nClasses,shape,W,lr):
    vgg_level=3
    input_height=shape[0]
    input_width=shape[1]
    img_input = Input(shape=(input_height,input_width,1))
    
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    f1 = x
    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    f2 = x
    
    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    f3 = x
    
    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    f4 = x
    
    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    f5 = x
    
    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense( 1000 , activation='softmax', name='predictions')(x)
    
#    vgg  = Model(  img_input , x  )
    
    levels = [f1 , f2 , f3 , f4 , f5 ]
    
    o = levels[ vgg_level ]
    	
    o = ( ZeroPadding2D( (1,1) ))(o)
    o = ( Conv2D(512, (3, 3), padding='valid'))(o)
    o = ( BatchNormalization())(o)
    
    o = ( UpSampling2D( (2,2)))(o)
    o = ( ZeroPadding2D( (1,1)))(o)
    o = ( Conv2D( 256, (3, 3), padding='valid'))(o)
    o = ( BatchNormalization())(o)
    
    o = ( UpSampling2D((2,2)  ) )(o)
    o = ( ZeroPadding2D((1,1) ))(o)
    o = ( Conv2D( 128 , (3, 3), padding='valid' ))(o)
    o = ( BatchNormalization())(o)
    
    o = ( UpSampling2D((2,2)  ))(o)
    o = ( ZeroPadding2D((1,1)  ))(o)
    o = ( Conv2D( 64 , (3, 3), padding='valid'  ))(o)
    o = ( BatchNormalization())(o)

    o = ( UpSampling2D((2,2)  ))(o)
    o = ( ZeroPadding2D((1,1)  ))(o)
    o = ( Conv2D( 64 , (3, 3), padding='valid'  ))(o)
    o = ( BatchNormalization())(o)
    
    o =  Conv2D( nClasses , (3, 3) , padding='same')( o )
    o_shape = Model(img_input , o ).output_shape
    outputHeight = o_shape[1]
    outputWidth = o_shape[2]
    
    if nClasses>1:    
        o = (Reshape((  -1  , outputHeight*outputWidth  )))(o)
        o = (Permute((2, 1)))(o)
        o = (Activation('softmax'))(o)
    if nClasses==1:
        o = (Activation('sigmoid'))(o)
    model = Model( img_input , o )
    model.outputWidth = outputWidth
    model.outputHeight = outputHeight
    if W !='':
        model.load_weights(W)
    if nClasses>1:
        model.compile(loss="categorical_crossentropy", optimizer = Adam(lr = 1e-4) , metrics=['accuracy'] )
    if nClasses==1:
        model.compile(loss="binary_crossentropy", optimizer = Adam(lr = 1e-4) , metrics=['accuracy'] )
    model.summary()  


#    img_w = shape[1]
#    img_h = shape[0]
#    n_labels = nClasses
#    
#    kernel = 3
#    pad = 1
#    pool_size = 2
#    
#    encoding_layers = [
#        Convolution2D(64, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(64, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        MaxPooling2D(pool_size=(pool_size, pool_size)),
#    
#        Convolution2D(128, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(128, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        MaxPooling2D(pool_size=(pool_size, pool_size)),
#    
#        Convolution2D(256, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(256, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(256, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        MaxPooling2D(pool_size=(pool_size, pool_size)),
#    
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        MaxPooling2D(pool_size=(pool_size, pool_size)),
#    
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        MaxPooling2D(pool_size=(pool_size, pool_size)),
#    ]
#    
#    decoding_layers = [
#        UpSampling2D(size=(pool_size,pool_size)),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#    
#        UpSampling2D(size=(pool_size,pool_size)),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(512, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(256, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#    
#        UpSampling2D(size=(pool_size,pool_size)),
#        Convolution2D(256, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(256, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(128, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#    
#        UpSampling2D(size=(pool_size,pool_size)),
#        Convolution2D(128, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(64, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#    
#        UpSampling2D(size=(pool_size,pool_size)),
#        Convolution2D(64, kernel, kernel, border_mode='same'),
#        BatchNormalization(),
#        Activation('relu'),
#        Convolution2D(n_labels, 1, 1, border_mode='valid'),
#        BatchNormalization(),
#    ]
#    
#    
#    segnet_basic = Sequential()
#    
#    segnet_basic.add(Layer(input_shape=(img_h, img_w,1)))
#    
#    
#    segnet_basic.encoding_layers = encoding_layers
#    for l in segnet_basic.encoding_layers:
#        segnet_basic.add(l)
#    
#    
#    segnet_basic.decoding_layers = decoding_layers
#    for l in segnet_basic.decoding_layers:
#        segnet_basic.add(l)
#    
#    
#    segnet_basic.add(Reshape((nClasses, img_h * img_w), input_shape=(nClasses,img_h, img_w)))
#    segnet_basic.add(Permute((2, 1)))
#    if nClasses>1:
#        segnet_basic.add(Activation('softmax'))
#    if nClasses==1:
#        segnet_basic.add(Activation('sigmoid'))
#    if nClasses>1:
#        segnet_basic.compile(loss="categorical_crossentropy", optimizer = Adam(lr = 1e-4) , metrics=['accuracy'] )
#    if nClasses==1:
#        segnet_basic.compile(loss="binary_crossentropy", optimizer = Adam(lr = 1e-4) , metrics=['accuracy'] )
#    segnet_basic.summary()    
    return model
def get_unet():
    with tf.device('/device:GPU:0'):
        inputs = Input((192, 192, 3), name='Input')

        conv1 = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv1_1')(inputs)
        conv1 = BatchNormalization(name='BN1_1')(conv1)
        conv1 = Activation('relu', name='Activation1_1')(conv1)
        conv1 = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv1_2')(conv1)
        conv1 = BatchNormalization(name='BN1_2')(conv1)
        conv1 = Activation('relu', name='Activation1_2')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2), name='MaxPool1')(conv1)

        conv2 = Conv2D(128, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv2_1')(pool1)
        conv2 = BatchNormalization(name='BN2_1')(conv2)
        conv2 = Activation('relu', name='Activation2_1')(conv2)
        conv2 = Conv2D(128, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv2_2')(conv2)
        conv2 = BatchNormalization(name='BN2_2')(conv2)
        conv2 = Activation('relu', name='Activation2_2')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2), name='MaxPool2')(conv2)

        conv3 = Conv2D(192, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv3_1')(pool2)
        conv3 = BatchNormalization(name='BN3_1')(conv3)
        conv3 = Activation('relu', name='Activation3_1')(conv3)
        conv3 = Conv2D(192, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv3_2')(conv3)
        conv3 = BatchNormalization(name='BN3_2')(conv3)
        conv3 = Activation('relu', name='Activation3_2')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2), name='MaxPool3')(conv3)

        conv4 = Conv2D(256, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv4_1')(pool3)
        conv4 = BatchNormalization(name='BN4_1')(conv4)
        conv4 = Activation('relu', name='Activation4_1')(conv4)
        conv4 = Conv2D(256, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv4_2')(conv4)
        conv4 = BatchNormalization(name='BN4_2')(conv4)
        conv4 = Activation('relu', name='Activation4_2')(conv4)
        drop4 = Dropout(0.2, name='Dropout1')(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2), name='MaxPool4')(drop4)

        conv5 = Conv2D(512, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv5_1')(pool4)
        conv5 = BatchNormalization(name='BN5_1')(conv5)
        conv5 = Activation('relu', name='Activation5_1')(conv5)
        conv5 = Conv2D(512, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv5_2')(conv5)
        conv5 = BatchNormalization(name='BN5_2')(conv5)
        conv5 = Activation('relu', name='Activation5_2')(conv5)
        drop5 = Dropout(0.2, name='Dropout2')(conv5)

    with tf.device('/device:GPU:1'):
        up6 = Conv2D(384, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     name='conv6_1')(UpSampling2D(size=(2, 2),
                                                  name='Up1')(drop5))
        merge6 = keras.layers.Concatenate(name='concat1')([drop4, up6])
        conv6 = Conv2D(256, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv6_2')(merge6)
        conv6 = BatchNormalization(name='BN6_1')(conv6)
        conv6 = Activation('relu', name='Activation6_1')(conv6)
        conv6 = Conv2D(224, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv6_3')(conv6)
        conv6 = BatchNormalization(name='BN6_2')(conv6)
        conv6 = Activation('relu', name='Activation6_2')(conv6)

        up7 = Conv2D(224, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     name='conv7_1')(UpSampling2D(size=(2, 2),
                                                  name='Up2')(conv6))
        merge7 = keras.layers.Concatenate(name='concat2')([conv3, up7])
        conv7 = Conv2D(192, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv7_2')(merge7)
        conv7 = BatchNormalization(name='BN7_1')(conv7)
        conv7 = Activation('relu', name='Activation7_1')(conv7)
        conv7 = Conv2D(160, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv7_3')(conv7)
        conv7 = BatchNormalization(name='BN7_2')(conv7)
        conv7 = Activation('relu', name='Activation7_2')(conv7)

        up8 = Conv2D(128, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     name='conv8_1')(UpSampling2D(size=(2, 2),
                                                  name='Up3')(conv7))
        merge8 = keras.layers.Concatenate(name='concat3')([conv2, up8])
        conv8 = Conv2D(96, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv8_2')(merge8)
        conv8 = BatchNormalization(name='BN8_1')(conv8)
        conv8 = Activation('relu', name='Activation8_1')(conv8)
        conv8 = Conv2D(64, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv8_3')(conv8)
        conv8 = BatchNormalization(name='BN8_2')(conv8)
        conv8 = Activation('relu', name='Activation8_2')(conv8)

        up9 = Conv2D(64, (3, 3),
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal',
                     name='conv9_1')(UpSampling2D(size=(2, 2),
                                                  name='Up4')(conv8))
        merge9 = keras.layers.Concatenate(name='concat4')([conv1, up9])
        conv9 = Conv2D(32, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv9_2')(merge9)
        conv9 = BatchNormalization(name='BN9_1')(conv9)
        conv9 = Activation('relu', name='Activation9_1')(conv9)
        conv9 = Conv2D(16, (3, 3),
                       padding='same',
                       kernel_initializer='he_normal',
                       name='conv9_3')(conv9)
        conv9 = BatchNormalization(name='BN9_2')(conv9)
        conv9 = Activation('relu', name='Activation9_2')(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid', name='conv10_1')(conv9)

        model = Model(inputs, conv10)

        model.compile(loss='binary_crossentropy',
                      optimizer=SGD(lr=0.003, momentum=0.9, nesterov=True),
                      metrics=[dice_coef, jaccard_coef, 'acc'])

    return model
예제 #9
0
def segnet(nClasses, optimizer=None, input_height=360, input_width=480):
    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2

    img_input = Input(shape=(input_height, input_width, 3))

    x = ZeroPadding2D(padding=(pad, pad))(img_input)
    x = Convolution2D(filter_size, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    l1 = x
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(128, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    l2 = x
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(256, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    l3 = x
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(512, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)
    l4 = x
    x = Activation('relu')(x)

    # decoder
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(512, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)

    #    x = Add()([l4, x])
    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(256, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)

    #   x = Add()([l3, x])
    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(128, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)

    #  x = Add()([l2, x])
    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(filter_size, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)

    # x = Add()([l1, x])
    x = Convolution2D(nClasses, (1, 1), padding='valid')(x)

    out = CrfRnnLayer(image_dims=(input_height, input_width),
                      num_classes=nClasses,
                      theta_alpha=160.,
                      theta_beta=3.,
                      theta_gamma=3.,
                      num_iterations=5,
                      name='crfrnn')([x, img_input])

    a = Model(inputs=img_input, outputs=out)

    model = []
    a.outputHeight = a.output_shape[1]
    a.outputWidth = a.output_shape[2]

    out = Reshape((a.outputHeight * a.outputWidth, nClasses),
                  input_shape=(nClasses, a.outputHeight, a.outputWidth))(out)
    out = Activation('softmax')(out)
    model = Model(inputs=img_input, outputs=out)
    model.outputHeight = a.outputHeight
    model.outputWidth = a.outputWidth

    return model
예제 #10
0
def Unet_HC(shp=cfg.shape_train_data, weights_path=cfg.path_load_weights):
    print('-- Current file: model.py -- Unet_HC model')

    act = 'elu'

    inputs = Input(shp, name='main_input')

    conv1 = Convolution2D(32, 3, 3, activation=act, border_mode='same')(inputs)
    conv1 = Convolution2D(32, 3, 3, activation=act, border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation=act, border_mode='same')(pool1)
    conv2 = Convolution2D(64, 3, 3, activation=act, border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation=act, border_mode='same')(pool2)
    conv3 = Convolution2D(128, 3, 3, activation=act, border_mode='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, 3, 3, activation=act, border_mode='same')(pool3)
    conv4 = Convolution2D(256, 3, 3, activation=act, border_mode='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(512, 3, 3, activation=act, border_mode='same')(pool4)
    conv5 = Convolution2D(512, 3, 3, activation=act, border_mode='same')(conv5)

    #Hypercolumns
    #hc_conv5 = UpSampling2D(size=(16, 16))(conv5) #8x8, f = 16
    #hc_conv4 = UpSampling2D(size=(8, 8))(conv4) #16x16, f = 8
    hc_conv3 = UpSampling2D(size=(4, 4))(conv3)  #32x32, f = 4
    hc_conv2 = UpSampling2D(size=(2, 2))(conv2)  #64x64, f = 2
    hc = merge([conv1, hc_conv2, hc_conv3], mode='concat',
               concat_axis=1)  #(None, 992, 128, 128)
    hc_red_conv1 = Convolution2D(128,
                                 3,
                                 3,
                                 border_mode='same',
                                 init='he_normal',
                                 activation=act)(hc)
    hc_red_conv2 = Convolution2D(64,
                                 3,
                                 3,
                                 border_mode='same',
                                 init='he_normal',
                                 activation=act)(hc_red_conv1)
    hc_red_conv3 = Convolution2D(shp[0],
                                 1,
                                 1,
                                 init='he_normal',
                                 activation='linear',
                                 name='aux_output')(hc_red_conv2)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, activation=act, border_mode='same')(up6)
    conv6 = Convolution2D(256, 3, 3, activation=act, border_mode='same')(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, activation=act, border_mode='same')(up7)
    conv7 = Convolution2D(128, 3, 3, activation=act, border_mode='same')(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, activation=act, border_mode='same')(up8)
    conv8 = Convolution2D(64, 3, 3, activation=act, border_mode='same')(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, activation=act, border_mode='same')(up9)
    conv9 = Convolution2D(32, 3, 3, activation=act, border_mode='same')(conv9)

    conv10 = Convolution2D(3, 1, 1, activation='linear',
                           name='main_output')(conv9)

    model = Model(input=inputs, output=[conv10, hc_red_conv3])

    if weights_path <> '':
        model.load_weights(weights_path)

    model.compile(optimizer=Adam(lr=cfg.learning_rate),
                  loss={
                      'main_output': huber,
                      'aux_output': huber
                  },
                  loss_weights={
                      'main_output': 0.8,
                      'aux_output': 0.2
                  })

    return model
예제 #11
0
    def __init__(self, digit_size, num_classes, latent_dim, sess_name=''):

        self.sess = tf.Session()
        self.digit_size = digit_size
        self.latent_dim = latent_dim

        self.a = tf.placeholder(tf.float32,
                                shape=(None, self.digit_size, self.digit_size,
                                       1))
        self.b = tf.placeholder(tf.float32, shape=(None, num_classes))
        self.c = tf.placeholder(tf.float32, shape=(None, latent_dim))

        self.img = Input(tensor=self.a)
        self.lbls = Input(tensor=self.b)
        self.z = Input(tensor=self.c)

        ############## Build Discriminator #################

        with tf.variable_scope('discriminator'):

            x = Conv2D(128,
                       kernel_size=(7, 7),
                       strides=(2, 2),
                       padding='same',
                       activation='relu')(self.img)
            x = self.add_units_to_conv2d(x, self.lbls)

            x = MaxPooling2D((2, 2), padding='same')(x)

            x = Conv2D(64,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu')(x)
            x = MaxPooling2D((2, 2), padding='same')(x)

            x = Conv2D(16,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu')(x)

            h = Flatten()(x)
            disc_output = Dense(1, activation='sigmoid')(h)

        self.discriminator = Model([self.img, self.lbls], disc_output)

        ############# Build Generator #####################

        with tf.variable_scope('generator'):

            x = concatenate([self.z, self.lbls])

            x = Dense(7 * 7 * 128, activation='relu')(x)
            x = Reshape((7, 7, 128))(x)
            x = UpSampling2D(size=(2, 2))(x)

            x = Conv2D(64,
                       kernel_size=(5, 5),
                       padding='same',
                       activation='relu')(x)

            x = Conv2D(32,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu')(x)

            x = UpSampling2D(size=(2, 2))(x)

            gen_output = Conv2D(1,
                                kernel_size=(5, 5),
                                activation='sigmoid',
                                padding='same')(x)

        self.generator = Model([self.z, self.lbls], gen_output)

        ################ GAN ##################

        self.generated_img = self.generator([self.z, self.lbls])

        self.discr_real_img = self.discriminator([self.img, self.lbls])

        self.discr_fake_img = self.discriminator(
            [self.generated_img, self.lbls])

        #self.cgan = Model([self.img, self.lbls], discr_fake_img)

        ############## Define Losses #################

        log_d_img = tf.reduce_mean(-tf.log(self.discr_real_img + 1e-10))
        log_d_gen_img = tf.reduce_mean(-tf.log(1. - self.discr_fake_img +
                                               1e-10))

        self.GenLoss = -log_d_gen_img
        self.DiscrLoss = 0.5 * (log_d_gen_img + log_d_img)

        ############## Optimizers #############
        GenOpt = tf.train.RMSPropOptimizer(0.0003)
        DiscrOpt = tf.train.RMSPropOptimizer(0.0001)

        Gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     'generator')
        Discr_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                       'discriminator')

        self.gen_step = GenOpt.minimize(self.GenLoss, var_list=Gen_vars)
        self.discr_step = DiscrOpt.minimize(self.DiscrLoss,
                                            var_list=Discr_vars)

        self.saver = tf.train.Saver()

        self.sess.run(tf.global_variables_initializer())

        if (sess_name):
            self.saver.restore(self.sess, './' + sess_name)
예제 #12
0
def EURNet(shp=cfg.shape_train_data, weights_path=cfg.path_load_weights):
    print('-- Current file: model.py -- EURNet model')

    split = True

    #Encoder
    inputs = Input(shp, name='main_input')
    conv1 = inception_block(inputs, 32, batch_mode=2, split=split)

    pool1 = custom_convolution2D(32,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv1)
    pool1 = Dropout(0.5)(pool1)

    conv2 = inception_block(pool1, 64, batch_mode=2, split=split)
    pool2 = custom_convolution2D(64,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv2)
    pool2 = Dropout(0.5)(pool2)

    conv3 = inception_block(pool2, 128, batch_mode=2, split=split)
    pool3 = custom_convolution2D(128,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv3)
    pool3 = Dropout(0.5)(pool3)

    conv4 = inception_block(pool3, 256, batch_mode=2, split=split)
    pool4 = custom_convolution2D(256,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv4)
    pool4 = Dropout(0.5)(pool4)

    conv5 = inception_block(pool4, 512, batch_mode=2, split=split)
    conv5 = Dropout(0.5)(conv5)

    #Hypercolumns
    hc_conv3 = UpSampling2D(size=(4, 4))(conv3)
    hc_conv2 = UpSampling2D(size=(2, 2))(conv2)
    hc = merge([conv1, hc_conv2, hc_conv3], mode='concat', concat_axis=1)

    i1 = inception_block(hc, 128, batch_mode=2, split=split)
    i1 = Dropout(0.5)(i1)
    i2 = inception_block(i1, 64, batch_mode=2, split=split)
    i2 = Dropout(0.5)(i2)
    hc_red_conv3 = Convolution2D(shp[0],
                                 1,
                                 1,
                                 init='he_normal',
                                 activation='linear',
                                 name='aux_output')(
                                     i2)  #W_regularizer=l2(0.01)

    #Decoder
    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = inception_block(up6, 256, batch_mode=2, split=split)
    conv6 = Dropout(0.5)(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = inception_block(up7, 128, batch_mode=2, split=split)
    conv7 = Dropout(0.5)(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = inception_block(up8, 64, batch_mode=2, split=split)
    conv8 = Dropout(0.5)(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=1)
    conv9 = inception_block(up9, 32, batch_mode=2, split=split)
    conv9 = Dropout(0.5)(conv9)
    conv10 = Convolution2D(shp[0],
                           1,
                           1,
                           init='he_normal',
                           activation='linear',
                           name='main_output')(conv9)  #W_regularizer=l2(0.01)

    model = Model(input=inputs, output=[conv10, hc_red_conv3])

    if weights_path <> '':
        model.load_weights(weights_path)

    model.compile(optimizer=Adam(lr=cfg.learning_rate),
                  loss={
                      'main_output': huber,
                      'aux_output': huber
                  },
                  loss_weights={
                      'main_output': 0.8,
                      'aux_output': 0.2
                  })

    return model
예제 #13
0
def HC(shp=cfg.shape_train_data, weights_path=cfg.path_load_weights):
    print('-- Current file: model.py -- HC model')

    split = True

    # Encoder
    inputs = Input(shp)
    conv1 = inception_block(inputs, 32, batch_mode=2, split=split)

    pool1 = custom_convolution2D(32,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv1)
    pool1 = Dropout(0.5)(pool1)

    conv2 = inception_block(pool1, 64, batch_mode=2, split=split)
    pool2 = custom_convolution2D(64,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv2)
    pool2 = Dropout(0.5)(pool2)

    conv3 = inception_block(pool2, 128, batch_mode=2, split=split)
    pool3 = custom_convolution2D(128,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv3)
    pool3 = Dropout(0.5)(pool3)

    conv4 = inception_block(pool3, 256, batch_mode=2, split=split)
    pool4 = custom_convolution2D(256,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv4)
    pool4 = Dropout(0.5)(pool4)

    conv5 = inception_block(pool4, 512, batch_mode=2, split=split)
    conv5 = Dropout(0.5)(conv5)

    # Hypercolumns
    hc_conv5 = UpSampling2D(size=(16, 16))(conv5)  #8x8, f = 16
    hc_conv4 = UpSampling2D(size=(8, 8))(conv4)  #16x16, f = 8
    hc_conv3 = UpSampling2D(size=(4, 4))(conv3)  #32x32, f = 4
    hc_conv2 = UpSampling2D(size=(2, 2))(conv2)  #64x64, f = 2

    hc = merge([conv1, hc_conv2, hc_conv3, hc_conv4, hc_conv5],
               mode='concat',
               concat_axis=1)
    #hc = merge([conv1, hc_conv2, hc_conv3], mode='concat', concat_axis=1)

    i1 = inception_block(hc, 128, batch_mode=2, split=split)
    i1 = Dropout(0.5)(i1)
    i2 = inception_block(i1, 64, batch_mode=2, split=split)
    i2 = Dropout(0.5)(i2)
    hc_red_conv3 = Convolution2D(shp[0],
                                 1,
                                 1,
                                 init='he_normal',
                                 activation='linear',
                                 name='aux_output')(i2)

    model = Model(input=inputs, output=[hc_red_conv3])

    if weights_path <> '':
        model.load_weights(weights_path)

    model.compile(loss=huber, optimizer=Adam(clipnorm=1.))

    return model
예제 #14
0
def SUnet(shp=cfg.shape_train_data, weights_path=cfg.path_load_weights):
    print('-- Current file: model.py -- Unet model with inception modules')

    split = False

    # Encoder
    inputs = Input(shp)
    conv1 = inception_block(inputs, 32, batch_mode=2, split=split)

    pool1 = custom_convolution2D(32,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv1)
    pool1 = Dropout(0.5)(pool1)

    conv2 = inception_block(pool1, 64, batch_mode=2, split=split)
    pool2 = custom_convolution2D(64,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv2)
    pool2 = Dropout(0.5)(pool2)

    conv3 = inception_block(pool2, 128, batch_mode=2, split=split)
    pool3 = custom_convolution2D(128,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv3)
    pool3 = Dropout(0.5)(pool3)

    conv4 = inception_block(pool3, 256, batch_mode=2, split=split)
    pool4 = custom_convolution2D(256,
                                 3,
                                 3,
                                 border_mode='same',
                                 subsample=(2, 2))(conv4)
    pool4 = Dropout(0.5)(pool4)

    conv5 = inception_block(pool4, 512, batch_mode=2, split=split)
    conv5 = Dropout(0.5)(conv5)

    # Decoder
    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4],
                mode='concat',
                concat_axis=1)
    conv6 = inception_block(up6, 256, batch_mode=2, split=split)
    conv6 = Dropout(0.5)(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3],
                mode='concat',
                concat_axis=1)
    conv7 = inception_block(up7, 128, batch_mode=2, split=split)
    conv7 = Dropout(0.5)(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2],
                mode='concat',
                concat_axis=1)
    conv8 = inception_block(up8, 64, batch_mode=2, split=split)
    conv8 = Dropout(0.5)(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1],
                mode='concat',
                concat_axis=1)
    conv9 = inception_block(up9, 32, batch_mode=2, split=split)
    conv9 = Dropout(0.5)(conv9)

    conv10 = Convolution2D(shp[0],
                           1,
                           1,
                           init='he_normal',
                           activation='sigmoid')(conv9)

    model = Model(input=inputs, output=[conv10])

    if weights_path <> '':
        model.load_weights(weights_path)

    model.compile(loss=huber, optimizer=Adam(lr=cfg.learning_rate))

    return model
예제 #15
0
def segnet(nClasses, optimizer=None, input_height=360, input_width=480):

    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2

    model = models.Sequential()
    model.add(Layer(input_shape=(3, input_height, input_width)))

    # encoder
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))

    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    # decoder
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(UpSampling2D(size=(pool_size, pool_size)))
    model.add(ZeroPadding2D(padding=(pad, pad)))
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())

    model.add(Convolution2D(
        nClasses,
        1,
        1,
        border_mode='valid',
    ))

    model.outputHeight = model.output_shape[-2]
    model.outputWidth = model.output_shape[-1]

    model.add(
        Reshape((nClasses, model.output_shape[-2] * model.output_shape[-1]),
                input_shape=(nClasses, model.output_shape[-2],
                             model.output_shape[-1])))

    model.add(Permute((2, 1)))
    model.add(Activation('softmax'))

    if optimizer != None:
        model.compile(loss="categorical_crossentropy",
                      optimizer=optimizer,
                      metrics=['accuracy'])

    return model
예제 #16
0
    res8 = seperableConv_bottleneck_block_with_se(pool4,
                                                  filters=384,
                                                  cardinality=8,
                                                  strides=1,
                                                  weight_decay=5e-4)
    res8 = seperableConv_bottleneck_block_with_se(
        res8, filters=384, cardinality=32, strides=1,
        weight_decay=5e-4)  #12x12x384

with tf.device('/device:GPU:3'):
    up1 = Conv2D(256, (3, 3),
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(
                     UpSampling2D(size=(2, 2))(res8))  #24x24x256
    merge1 = keras.layers.Add()([res7, up1])
    upres8 = seperableConv_bottleneck_block_with_se(
        merge1, filters=256, cardinality=32, strides=1,
        weight_decay=5e-4)  #24x24x256

    up2 = Conv2D(192, (3, 3),
                 activation='relu',
                 padding='same',
                 kernel_initializer='he_normal')(
                     UpSampling2D(size=(2, 2))(upres8))  #48x48x192
    merge2 = keras.layers.Add()([res6, up2])
    upres7 = seperableConv_bottleneck_block_with_se(
        merge2, filters=192, cardinality=32, strides=1,
        weight_decay=5e-4)  #48x48x192
예제 #17
0
def isensee2017(nClasses,shape,W,lr, n_base_filters=16, depth=5, dropout_rate=0.3,
                      n_segmentation_levels=3, optimizer=Adam, initial_learning_rate=5e-4,
                      loss_function=weighted_dice_coefficient_loss, activation_name="sigmoid"):
    """
    This function builds a model proposed by Isensee et al. for the BRATS 2017 competition:
    https://www.cbica.upenn.edu/sbia/Spyridon.Bakas/MICCAI_BraTS/MICCAI_BraTS_2017_proceedings_shortPapers.pdf

    This network is highly similar to the model proposed by Kayalibay et al. "CNN-based Segmentation of Medical
    Imaging Data", 2017: https://arxiv.org/pdf/1701.03056.pdf


    :param input_shape:
    :param n_base_filters:
    :param depth:
    :param dropout_rate:
    :param n_segmentation_levels:
    :param nClasses:
    :param optimizer:
    :param initial_learning_rate:
    :param loss_function:
    :param activation_name:
    :return:
    """
    inputs = Input(shape)

    current_layer = inputs
    level_output_layers = list()
    level_filters = list()
    for level_number in range(depth):
        n_level_filters = (2**level_number) * n_base_filters
        level_filters.append(n_level_filters)

        if current_layer is inputs:
            in_conv = create_convolution_block(current_layer, n_level_filters)
        else:
            in_conv = create_convolution_block(current_layer, n_level_filters, strides=(2, 2))

        context_output_layer = create_context_module(in_conv, n_level_filters, dropout_rate=dropout_rate)

        summation_layer = Add()([in_conv, context_output_layer])
        level_output_layers.append(summation_layer)
        current_layer = summation_layer

    segmentation_layers = list()
    for level_number in range(depth - 2, -1, -1):
        up_sampling = create_up_sampling_module(current_layer, level_filters[level_number])
        concatenation_layer = concatenate([level_output_layers[level_number], up_sampling], axis=3)
        localization_output = create_localization_module(concatenation_layer, level_filters[level_number])
        current_layer = localization_output
        if level_number < n_segmentation_levels:
            segmentation_layers.insert(0, create_convolution_block(current_layer, n_filters=nClasses, kernel=(1, 1)))

    output_layer = None
    for level_number in reversed(range(n_segmentation_levels)):
        segmentation_layer = segmentation_layers[level_number]
        if output_layer is None:
            output_layer = segmentation_layer
        else:
            output_layer = Add()([output_layer, segmentation_layer])

        if level_number > 0:
            output_layer = UpSampling2D(size=(2, 2))(output_layer)

    activation_block = Activation(activation_name)(output_layer)

    model = Model(inputs=inputs, outputs=activation_block)
    if W !='':
        model.load_weights(W)
    model.compile(optimizer=optimizer(lr=initial_learning_rate), loss=loss_function)
    model.summary()
    return model
예제 #18
0
def segnet(
        input_shape,
        n_labels,
        kernel=3,
        pool_size=(2, 2),
        output_mode="softmax",
        use_residual=False,
        use_argmax=True):
    # encoder
    inputs = Input(shape=input_shape)

    residual_connections = []
    
    conv_1 = Conv2D(64, kernel, padding="same")(inputs)
    conv_1 = BatchNormalization()(conv_1)
    conv_1 = Activation("relu")(conv_1)
    conv_2 = Conv2D(64, kernel, padding="same")(conv_1)
    conv_2 = BatchNormalization()(conv_2)
    conv_2 = Activation("relu")(conv_2)

    if use_argmax: pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)
    else: pool_1 = MaxPooling2D(pool_size)(conv_2)
    if use_residual: residual_connections.append(pool_1)
    
    conv_3 = Conv2D(128, kernel, padding="same")(pool_1)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = Activation("relu")(conv_3)
    conv_4 = Conv2D(128, kernel, padding="same")(conv_3)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = Activation("relu")(conv_4)
    
    if use_argmax: pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)
    else: pool_2 = MaxPooling2D(pool_size)(conv_4)
    if use_residual: residual_connections.append(pool_2)
    
    conv_5 = Conv2D(256, kernel, padding="same")(pool_2)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = Activation("relu")(conv_5)
    conv_6 = Conv2D(256, kernel, padding="same")(conv_5)
    conv_6 = BatchNormalization()(conv_6)
    conv_6 = Activation("relu")(conv_6)
    conv_7 = Conv2D(256, kernel, padding="same")(conv_6)
    conv_7 = BatchNormalization()(conv_7)
    conv_7 = Activation("relu")(conv_7)
    
    if use_argmax: pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size)(conv_7)
    else: pool_3 = MaxPooling2D(pool_size)(conv_7)
    if use_residual: residual_connections.append(pool_3)
    
    conv_8 = Conv2D(512, kernel, padding="same")(pool_3)
    conv_8 = BatchNormalization()(conv_8)
    conv_8 = Activation("relu")(conv_8)
    conv_9 = Conv2D(512, kernel, padding="same")(conv_8)
    conv_9 = BatchNormalization()(conv_9)
    conv_9 = Activation("relu")(conv_9)
    conv_10 = Conv2D(512, kernel, padding="same")(conv_9)
    conv_10 = BatchNormalization()(conv_10)
    conv_10 = Activation("relu")(conv_10)
    
    if use_argmax: pool_4, mask_4 = MaxPoolingWithArgmax2D(pool_size)(conv_10)
    else: pool_4 = MaxPooling2D(pool_size)(conv_10)
    if use_residual: residual_connections.append(pool_4)
    
    conv_11 = Conv2D(512, kernel, padding="same")(pool_4)
    conv_11 = BatchNormalization()(conv_11)
    conv_11 = Activation("relu")(conv_11)
    conv_12 = Conv2D(512, kernel, padding="same")(conv_11)
    conv_12 = BatchNormalization()(conv_12)
    conv_12 = Activation("relu")(conv_12)
    conv_13 = Conv2D(512, kernel, padding="same")(conv_12)
    conv_13 = BatchNormalization()(conv_13)
    conv_13 = Activation("relu")(conv_13)
    
    if use_argmax: pool_5, mask_5 = MaxPoolingWithArgmax2D(pool_size)(conv_13)
    else: pool_5 = MaxPooling2D(pool_size)(conv_13)
    if use_residual: residual_connections.append(pool_5)
    print("Done building encoder..")
    
    # decoder
    
    if use_residual: pool_5 = Add()([pool_5, residual_connections[-1]])
    if use_argmax: unpool_1 = MaxUnpooling2D(pool_size)([pool_5, mask_5])
    else: unpool_1 = UpSampling2D(pool_size)(pool_5)
    
    conv_14 = Conv2D(512, kernel, padding="same")(unpool_1)
    conv_14 = BatchNormalization()(conv_14)
    conv_14 = Activation("relu")(conv_14)
    conv_15 = Conv2D(512, kernel, padding="same")(conv_14)
    conv_15 = BatchNormalization()(conv_15)
    conv_15 = Activation("relu")(conv_15)
    conv_16 = Conv2D(512, kernel, padding="same")(conv_15)
    conv_16 = BatchNormalization()(conv_16)
    conv_16 = Activation("relu")(conv_16)
    
    if use_residual: conv_16 = Add()([conv_16, residual_connections[-2]])
    if use_argmax: unpool_2 = MaxUnpooling2D(pool_size)([conv_16, mask_4])
    else: unpool_2 = UpSampling2D(pool_size)(conv_16)
    
    conv_17 = Conv2D(512, kernel, padding="same")(unpool_2)
    conv_17 = BatchNormalization()(conv_17)
    conv_17 = Activation("relu")(conv_17)
    conv_18 = Conv2D(512, kernel, padding="same")(conv_17)
    conv_18 = BatchNormalization()(conv_18)
    conv_18 = Activation("relu")(conv_18)
    conv_19 = Conv2D(256, kernel, padding="same")(conv_18)
    conv_19 = BatchNormalization()(conv_19)
    conv_19 = Activation("relu")(conv_19)
    
    if use_residual: conv_19 = Add()([conv_19, residual_connections[-3]])
    if use_argmax: unpool_3 = MaxUnpooling2D(pool_size)([conv_19, mask_3])
    else: unpool_3 = UpSampling2D(pool_size)(conv_19)
    
    conv_20 = Conv2D(256, kernel, padding="same")(unpool_3)
    conv_20 = BatchNormalization()(conv_20)
    conv_20 = Activation("relu")(conv_20)
    conv_21 = Conv2D(256, kernel, padding="same")(conv_20)
    conv_21 = BatchNormalization()(conv_21)
    conv_21 = Activation("relu")(conv_21)
    conv_22 = Conv2D(128, kernel, padding="same")(conv_21)
    conv_22 = BatchNormalization()(conv_22)
    conv_22 = Activation("relu")(conv_22)
    
    if use_residual: conv_22 = Add()([conv_22, residual_connections[-4]])
    if use_argmax: unpool_4 = MaxUnpooling2D(pool_size)([conv_22, mask_2])
    else: unpool_4 = UpSampling2D(pool_size)(conv_22)
    
    conv_23 = Conv2D(128, kernel, padding="same")(unpool_4)
    conv_23 = BatchNormalization()(conv_23)
    conv_23 = Activation("relu")(conv_23)
    conv_24 = Conv2D(64, kernel, padding="same")(conv_23)
    conv_24 = BatchNormalization()(conv_24)
    conv_24 = Activation("relu")(conv_24)
    
    if use_residual: conv_24 = Add()([conv_24, residual_connections[-5]])
    if use_argmax: unpool_5 = MaxUnpooling2D(pool_size)([conv_24, mask_1])
    else: unpool_5 = UpSampling2D(pool_size)(conv_24)
    
    conv_25 = Conv2D(64, kernel, padding="same")(unpool_5)
    conv_25 = BatchNormalization()(conv_25)
    conv_25 = Activation("relu")(conv_25)

    conv_26 = Conv2D(n_labels, 1, padding="same")(conv_25)
    conv_26 = BatchNormalization()(conv_26)

    outputs = Activation(output_mode)(conv_26)
    print("Done building decoder..")

    model = Model(inputs=inputs, outputs=outputs, name="SegNet")

    return model
예제 #19
0
def SegNet(nClasses,shape,W,lr):
    input_height=shape[0]
    input_width=shape[1]
    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2

    model = Sequential()
    model.add(Layer(input_shape=(input_height , input_width , 1)))
    
        # encoder
    model.add(ZeroPadding2D(padding=(pad,pad)))
    model.add(Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    
    model.add(ZeroPadding2D(padding=(pad,pad)))
    model.add(Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    
    model.add(ZeroPadding2D(padding=(pad,pad)))
    model.add(Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
    
    model.add(ZeroPadding2D(padding=(pad,pad)))
    model.add(Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    
    
    	# decoder
    model.add( ZeroPadding2D(padding=(pad,pad)))
    model.add( Convolution2D(512, kernel, kernel, border_mode='valid'))
    model.add( BatchNormalization())
    
    model.add( UpSampling2D(size=(pool_size,pool_size)))
    model.add( ZeroPadding2D(padding=(pad,pad)))
    model.add( Convolution2D(256, kernel, kernel, border_mode='valid'))
    model.add( BatchNormalization())
    
    model.add( UpSampling2D(size=(pool_size,pool_size)))
    model.add( ZeroPadding2D(padding=(pad,pad)))
    model.add( Convolution2D(128, kernel, kernel, border_mode='valid'))
    model.add( BatchNormalization())
    
    model.add( UpSampling2D(size=(pool_size,pool_size)))
    model.add( ZeroPadding2D(padding=(pad,pad)))
    model.add( Convolution2D(filter_size, kernel, kernel, border_mode='valid'))
    model.add( BatchNormalization())


    model.add(Convolution2D( nClasses , 1, 1, border_mode='valid'))

    model.outputHeight = model.output_shape[-2]
    model.outputWidth = model.output_shape[-1]

    if nClasses==1:
        model.add(Activation('sigmoid'))
    if nClasses>1:
        model.add(Activation('relu'))
        model.add(Reshape((shape[0] * shape[1],nClasses), input_shape=(shape[0], shape[1],nClasses)))
#        model.add(Permute((2, 1)))
        model.add(Activation('softmax'))
    if W !='':
        model.load_weights(W)
    if nClasses == 1:
        model.compile(loss='binary_crossentropy',optimizer=Adam(lr=0.0008),metrics=['accuracy'])
    if nClasses > 1:
        model.compile(loss='categorical_crossentropy',optimizer=Adadelta(lr=1.0),metrics=['accuracy'])
    model.summary()
    return model
예제 #20
0
    def create_model(self):
        init_img_width = self.img_width // 4
        init_img_height = self.img_height // 4

        random_input = Input(shape=(self.random_input_dim, ))
        text_input1 = Input(shape=(self.text_input_dim, ))
        random_dense = Dense(self.random_input_dim)(random_input)
        text_layer1 = Dense(1024)(text_input1)

        merged = concatenate([random_dense, text_layer1])
        generator_layer = Activation('tanh')(merged)

        generator_layer = Dense(128 * init_img_width *
                                init_img_height)(generator_layer)
        generator_layer = BatchNormalization()(generator_layer)
        generator_layer = Activation('tanh')(generator_layer)
        generator_layer = Reshape(
            (init_img_width, init_img_height, 128),
            input_shape=(128 * init_img_width *
                         init_img_height, ))(generator_layer)
        generator_layer = UpSampling2D(size=(2, 2))(generator_layer)
        generator_layer = Conv2D(64, kernel_size=5,
                                 padding='same')(generator_layer)
        generator_layer = Activation('tanh')(generator_layer)
        generator_layer = UpSampling2D(size=(2, 2))(generator_layer)
        generator_layer = Conv2D(self.img_channels,
                                 kernel_size=5,
                                 padding='same')(generator_layer)
        generator_output = Activation('tanh')(generator_layer)

        self.generator = Model([random_input, text_input1], generator_output)

        self.generator.compile(loss='mean_squared_error', optimizer="SGD")

        print('generator: ', self.generator.summary())

        text_input2 = Input(shape=(self.text_input_dim, ))
        text_layer2 = Dense(1024)(text_input2)

        img_input2 = Input(shape=(self.img_width, self.img_height,
                                  self.img_channels))
        img_layer2 = Conv2D(64, kernel_size=(5, 5), padding='same')(img_input2)
        img_layer2 = Activation('tanh')(img_layer2)
        img_layer2 = MaxPooling2D(pool_size=(2, 2))(img_layer2)
        img_layer2 = Conv2D(128, kernel_size=5)(img_layer2)
        img_layer2 = Activation('tanh')(img_layer2)
        img_layer2 = MaxPooling2D(pool_size=(2, 2))(img_layer2)
        img_layer2 = Flatten()(img_layer2)
        img_layer2 = Dense(1024)(img_layer2)

        merged = concatenate([img_layer2, text_layer2])

        discriminator_layer = Activation('tanh')(merged)
        discriminator_layer = Dense(1)(discriminator_layer)
        discriminator_output = Activation('sigmoid')(discriminator_layer)

        self.discriminator = Model([img_input2, text_input2],
                                   discriminator_output)

        d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
        self.discriminator.compile(loss='binary_crossentropy',
                                   optimizer=d_optim)

        print('discriminator: ', self.discriminator.summary())

        model_output = self.discriminator([self.generator.output, text_input1])

        self.model = Model([random_input, text_input1], model_output)
        self.discriminator.trainable = False

        g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
        self.model.compile(loss='binary_crossentropy', optimizer=g_optim)

        print('generator-discriminator: ', self.model.summary())
예제 #21
0
def UNet(nClasses,shape,W,lr):

    inputs = Input((shape[0], shape[1],1))

    conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
    print ("conv1 shape:",conv1.shape)
    conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
    print ("conv1 shape:",conv1.shape)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    print ("pool1 shape:",pool1.shape)
    
    conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
    print ("conv2 shape:",conv2.shape)
    conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
    print ("conv2 shape:",conv2.shape)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    print ("pool2 shape:",pool2.shape)
    
    conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
    print ("conv3 shape:",conv3.shape)
    conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
    print ("conv3 shape:",conv3.shape)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    print ("pool3 shape:",pool3.shape)
    
    conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
    conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
    
    conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
    conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
    drop5 = Dropout(0.5)(conv5)
    
    up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
    merge6 = merge([drop4,up6], mode = 'concat', concat_axis = 3)
    conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
    conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
    
    up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
    merge7 = merge([conv3,up7], mode = 'concat', concat_axis = 3)
    conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
    conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
    
    up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
    merge8 = merge([conv2,up8], mode = 'concat', concat_axis = 3)
    conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
    conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
    
    up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
    merge9 = merge([conv1,up9], mode = 'concat', concat_axis = 3)
    conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
    conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
    conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)

    
    o_shape = Model(inputs , conv9 ).output_shape
    outputHeight = o_shape[1]
    outputWidth = o_shape[2]
    
    if nClasses>1:
        conv10 = Conv2D(nClasses, 1)(conv9)
        Reshape1 = Reshape((shape[0] * shape[1],nClasses), input_shape=(shape[0], shape[1],nClasses))(conv10)
#        o = (Permute((2, 1)))(o)
        o = (Activation('softmax'))(Reshape1)
    if nClasses==1:
        o = Conv2D(1, 1, activation = 'sigmoid')(conv9)
    model = Model( inputs , o )
    model.outputWidth = outputWidth
    model.outputHeight = outputHeight
    if W !='':
        model.load_weights(W)
    if nClasses>1:
        model.compile(loss="categorical_crossentropy", optimizer=Adam(lr =lr) , metrics=['categorical_accuracy'] )
    if nClasses==1:
        model.compile(loss="binary_crossentropy", optimizer = Adam(lr = lr) , metrics=['accuracy'] )
    model.summary()  
    return model
예제 #22
0
def generate_model(num_classes,
                   num_channel=3,
                   input_size=(300, 400),
                   output_size=(300, 400)):

    # U-Net for binary segmentation

    inputs = Input((num_channel, ) + input_size)

    conv1 = Conv2D(64, kernel_size=(3, 3), padding='same')(inputs)
    conv1 = BatchNormalization(axis=1)(PReLU()(conv1))
    conv1 = Conv2D(64, kernel_size=(3, 3), padding='same')(conv1)
    conv1 = BatchNormalization(axis=1)(PReLU()(conv1))
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, kernel_size=(3, 3), padding='same')(pool1)
    conv2 = BatchNormalization(axis=1)(PReLU()(conv2))
    conv2 = Conv2D(128, kernel_size=(3, 3), padding='same')(conv2)
    conv2 = BatchNormalization(axis=1)(PReLU()(conv2))
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, kernel_size=(3, 3), padding='same')(pool2)
    conv3 = BatchNormalization(axis=1)(PReLU()(conv3))
    conv3 = Conv2D(256, kernel_size=(3, 3), padding='same')(conv3)
    conv3 = BatchNormalization(axis=1)(PReLU()(conv3))
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, kernel_size=(3, 3), padding='same')(pool3)
    conv4 = BatchNormalization(axis=1)(PReLU()(conv4))
    conv4 = Conv2D(512, kernel_size=(3, 3), padding='same')(conv4)
    conv4 = BatchNormalization(axis=1)(PReLU()(conv4))
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(1024, kernel_size=(3, 3), padding='same')(pool4)
    conv5 = BatchNormalization(axis=1)(PReLU()(conv5))
    conv5 = Conv2D(1024, kernel_size=(3, 3), padding='same')(conv5)
    conv5 = BatchNormalization(axis=1)(PReLU()(conv5))

    up6 = Conv2D(512, kernel_size=(2, 2),
                 padding='same')(UpSampling2D(size=(2, 2))(conv5))
    concat6 = concatenate([conv4, up6], axis=1)
    conv6 = Conv2D(512, kernel_size=(3, 3), padding='same')(concat6)
    conv6 = BatchNormalization(axis=1)(PReLU()(conv6))
    conv6 = Conv2D(512, kernel_size=(3, 3), padding='same')(conv6)
    conv6 = BatchNormalization(axis=1)(PReLU()(conv6))

    up7 = Conv2D(256, kernel_size=(2, 2),
                 padding='same')(UpSampling2D(size=(2, 2))(conv6))
    concat7 = concatenate([conv3, up7], axis=1)
    conv7 = Conv2D(256, kernel_size=(3, 3), padding='same')(concat7)
    conv7 = BatchNormalization(axis=1)(PReLU()(conv7))
    conv7 = Conv2D(256, kernel_size=(3, 3), padding='same')(conv7)
    conv7 = BatchNormalization(axis=1)(PReLU()(conv7))

    up8 = Conv2D(128, kernel_size=(2, 2),
                 padding='same')(UpSampling2D(size=(2, 2))(conv7))
    concat8 = concatenate([conv2, up8], axis=1)
    conv8 = Conv2D(128, kernel_size=(3, 3), padding='same')(concat8)
    conv8 = BatchNormalization(axis=1)(PReLU()(conv8))
    conv8 = Conv2D(128, kernel_size=(3, 3), padding='same')(conv8)
    conv8 = BatchNormalization(axis=1)(PReLU()(conv8))

    up9 = Conv2D(64, kernel_size=(2, 2),
                 padding='same')(UpSampling2D(size=(2, 2))(conv8))
    concat9 = concatenate([conv1, up9], axis=1)
    conv9 = Conv2D(64, kernel_size=(3, 3), padding='same')(concat9)
    conv9 = BatchNormalization(axis=1)(PReLU()(conv9))
    conv9 = Conv2D(64, kernel_size=(3, 3), padding='same')(conv9)
    conv9 = BatchNormalization(axis=1)(PReLU()(conv9))

    pred = Conv2D(num_classes, kernel_size=(1, 1))(conv9)
    pred = PReLU()(pred)
    pred = Reshape((num_classes, output_size[0] * output_size[1]))(pred)
    pred = Permute((2, 1))(pred)
    pred = Activation('softmax')(pred)
    pred = Reshape(output_size + (num_classes, ))(pred)

    model = Model(inputs=inputs, outputs=pred)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['categorical_accuracy'])

    return model
예제 #23
0
def generator_unet_upsampling(img_dim,
                              bn_mode,
                              model_name="generator_unet_upsampling"):

    nb_filters = 64

    if K.image_dim_ordering() == "th":
        bn_axis = 1
        nb_channels = img_dim[0]
        min_s = min(img_dim[1:])
    else:
        bn_axis = -1
        nb_channels = img_dim[-1]
        min_s = min(img_dim[:-1])

    unet_input = Input(shape=img_dim, name="unet_input")

    # Prepare encoder filters
    nb_conv = int(np.floor(np.log(min_s) / np.log(2)))
    if VERBOSE: print(nb_conv, "number of convolutions")
    list_nb_filters = [nb_filters * min(8, (2**i)) for i in range(nb_conv)]

    # Encoder
    xw_wid, yw_wid = _get_conv_size(unet_input)
    list_encoder = [
        Convolution2D(list_nb_filters[0],
                      xw_wid,
                      yw_wid,
                      subsample=(2, 2),
                      name="unet_conv2D_1",
                      border_mode="same")(unet_input)
    ]
    for i, f in enumerate(list_nb_filters[1:]):
        name = "unet_conv2D_%s" % (i + 2)
        conv = conv_block_unet(list_encoder[-1], f, name, bn_mode, bn_axis)
        list_encoder.append(conv)

    # Prepare decoder filters
    list_nb_filters = list_nb_filters[:-2][::-1]
    if len(list_nb_filters) < nb_conv - 1:
        list_nb_filters.append(nb_filters)

    # Decoder
    list_decoder = [
        up_conv_block_unet(list_encoder[-1],
                           list_encoder[-2],
                           list_nb_filters[0],
                           "unet_upconv2D_1",
                           bn_mode,
                           bn_axis,
                           dropout=True)
    ]
    for i, f in enumerate(list_nb_filters[1:]):
        name = "unet_upconv2D_%s" % (i + 2)
        # Dropout only on first few layers
        if i < 2:
            d = True
        else:
            d = False
        conv = up_conv_block_unet(list_decoder[-1],
                                  list_encoder[-(i + 3)],
                                  f,
                                  name,
                                  bn_mode,
                                  bn_axis,
                                  dropout=d)
        list_decoder.append(conv)

    x = Activation("relu")(list_decoder[-1])
    x = UpSampling2D(size=(2, 2))(x)
    xw_wid, yw_wid = _get_conv_size(x)
    x = Convolution2D(nb_channels,
                      xw_wid,
                      xw_wid,
                      name="last_conv",
                      border_mode="same")(x)
    x = Activation("tanh")(x)

    generator_unet = Model(input=[unet_input], output=[x])

    return generator_unet
예제 #24
0
 def deconv2d(layer_input):
     """Layers used during upsampling"""
     u = UpSampling2D(size=2)(layer_input)
     u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u)
     u = Activation('relu')(u)
     return u
예제 #25
0
else:
	print ("Building Model...")
	model = Sequential()

	model.add(Conv2D(48, (5, 5), padding='same', input_shape=x_train.shape[1:]))
	model.add(Activation("relu"))
	model.add(MaxPooling2D(pool_size=(2,2)))
	model.add(Conv2D(96, (5, 5), padding='same'))
	model.add(Activation("relu"))
	model.add(MaxPooling2D(pool_size=(2,2)))
	model.add(Conv2D(192, (5, 5), padding='same'))
	model.add(Activation("relu"))
	model.add(MaxPooling2D(pool_size=(2,2)))
	model.add(Conv2D(192, (5, 5), padding='same'))
	model.add(Activation("relu"))
	model.add(UpSampling2D(size=(2,2)))
	model.add(Conv2D(192, (5, 5), padding='same'))
	model.add(Activation("relu"))
	model.add(UpSampling2D(size=(2,2)))
	model.add(Conv2D(96, (5, 5), padding='same'))
	model.add(Activation("relu"))
	model.add(UpSampling2D(size=(2,2)))
	model.add(Conv2D(48, (5, 5), padding='same'))
	model.add(Activation("relu"))
	model.add(Conv2D(3, (1, 1), padding='same'))
	model.add(Activation("sigmoid"))

	model.compile(optimizer=Adam(lr=lr), loss='mse')
	plot_model(model, to_file='model.png', show_shapes=True)

###################################
예제 #26
0
    if float(2 * np.sqrt(np.array(history.losses[-1]))) < 1.6:
        lrate = 0.006
        momentum = 0.4
        decay_rate = 0.0
        return lrate
    else:
        lrate = 0.01
        return lrate


generator = Sequential()
generator.add(
    Convolution2D(20, 3, 3, border_mode='valid', input_shape=input_shape))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Convolution2D(20, 3, 3, init='glorot_uniform'))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))
generator.add(Convolution2D(20, 3, 3, init='glorot_uniform'))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))
generator.add(MaxPooling2D(pool_size=(3, 3)))
generator.add(Convolution2D(4, 3, 3, init='glorot_uniform'))
generator.add(BatchNormalization(mode=2))
generator.add(Activation('relu'))
generator.add(Reshape((28, 28, 1)))
generator.compile(loss='binary_crossentropy', optimizer='adam')
generator.summary()

############# DISCRIMINATOR
예제 #27
0
decay_rate = 5e-5
momentum = 0.9

sgd = SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=True)
shape2

recog0 = Sequential()
recog0.add(
    Convolution2D(20, 4, 4, border_mode='valid', input_shape=input_shape))
recog0.add(BatchNormalization(mode=2))
recog0.add(MaxPooling2D(pool_size=(2, 2)))

recog = recog0
recog.add(Activation('relu'))
recog.add(MaxPooling2D(pool_size=(2, 2)))
recog.add(UpSampling2D(size=(2, 2)))
recog.add(Convolution2D(20, 1, 1, init='glorot_uniform'))
recog.add(BatchNormalization(mode=2))
recog.add(Activation('relu'))

for i in range(0, 8):
    print(i, recog0.layers[i].name)

recog_res = recog0
part = 8
recog0.layers[part].name
get_0_layer_output = K.function(
    [recog0.layers[0].input, K.learning_phase()], [recog0.layers[part].output])

get_0_layer_output([x_train, 0])[0][0]
예제 #28
0
graph.add_node(Convolution2D(64,
                             3,
                             3,
                             border_mode='same',
                             activation='linear',
                             init='glorot_uniform',
                             dim_ordering='tf'),
               name='R41',
               input='BNN1')
graph.add_node(Activation('relu'), name='AR41', input='R41')
graph.add_node(BatchNormalization(axis=-1), name='BNR41', input='AR41')

graph.add_node(Dropout(0.3), input='BNR41', name='RD3')

#################################################################
graph.add_node(UpSampling2D(size=(2, 2), dim_ordering='tf'),
               name='RU3',
               input='RD3')
##############################################################

graph.add_node(Convolution2D(64,
                             3,
                             3,
                             border_mode='same',
                             activation='linear',
                             init='glorot_uniform',
                             dim_ordering='tf'),
               name='R32',
               inputs=['RU3', 'LD3', 'ND2'])
graph.add_node(Activation('relu'), name='AR32', input='R32')
graph.add_node(BatchNormalization(axis=-1), name='BNR32', input='AR32')
예제 #29
0
    def build_generator(self):

        # noise = Input(shape=(self.latent_dim,))

        # x = Reshape((1,1,self.latent_dim))(noise)

        # x = Conv2DTranspose(self.latent_dim,4,use_bias=False)(x)
        # x = BatchNormalization(momentum=0.8)(x)
        # x = LeakyReLU(alpha=0.2)(x)

        # x = self.conv_inception_block_down(x, [64]*4, [3]*4, 1)
        # x = self.conv_inception_block_up(x, [64]*4, [3]*4, 2)

        # x = self.conv_inception_block_down(x, [32]*4, [3]*4, 1)
        # x = self.conv_inception_block_up(x, [32]*4, [3]*4, 2)

        # x = self.conv_inception_block_down(x, [16]*4, [3]*4, 1)
        # x = self.conv_inception_block_up(x, [16]*4, [3]*4, 2)

        # x = Conv2D(self.channels,(3,3),padding='same')(x)
        # img = Activation('linear')(x)

        # model = Model(noise, img)

        # model.summary()

        # return Model(noise, img)

        model = Sequential()

        # model.add(Dense(128 * 4 * 4, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((1, 1, self.latent_dim)))

        model.add(Conv2DTranspose(128, kernel_size=4))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))

        model.add(UpSampling2D())

        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))

        model.add(UpSampling2D())

        model.add(Conv2D(32, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(32, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))

        model.add(UpSampling2D())

        model.add(Conv2D(16, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(16, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))

        model.add(Conv2D(self.channels, kernel_size=1, padding="same"))
        model.add(Activation("linear"))

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        model.summary()

        return Model(noise, img)
x = Input(shape=(96, 96, 3))

# Encoder
conv1_1 = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
pool1 = MaxPooling2D((2, 2), padding='same')(conv1_1)
conv1_2 = Conv2D(32, (3, 3), activation='relu', padding='same')(pool1)
pool2 = MaxPooling2D((2, 2), padding='same')(conv1_2)
conv1_3 = Conv2D(32, (3, 3), activation='relu', padding='same')(pool2)
pool3 = MaxPooling2D((2, 2), padding='same')(conv1_3)
conv1_4 = Conv2D(16, (3, 3), activation='relu', padding='same')(pool3)

h = MaxPooling2D((2, 2), padding='same')(conv1_4)

# Decoder
conv2_1 = Conv2D(16, (3, 3), activation='relu', padding='same')(h)
up1 = UpSampling2D((2, 2))(conv2_1)
conv2_2 = Conv2D(32, (3, 3), activation='relu', padding='same')(up1)
up2 = UpSampling2D((2, 2))(conv2_2)
conv2_3 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2)
up3 = UpSampling2D((2, 2))(conv2_3)
conv2_4 = Conv2D(64, (3, 3), activation='relu', padding='same')(up3)
up4 = UpSampling2D((2, 2))(conv2_4)

r = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(up4)

autoencoder = Model(inputs=x, outputs=r)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.summary()

# plot_model(autoencoder, to_file='model.png')