Example #1
0
p4=MaxPooling2D((2,2), strides=(2,2), name='block4_pool', data_format='channels_last')(c4)
f4=p4
    
c5=Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv1', data_format='channels_last')(p4)
c5=Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv2', data_format='channels_last')(c5)
c5=Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv3', data_format='channels_last')(c5)
p5=MaxPooling2D((2,2), strides=(2,2), name='block5_pool', data_format='channels_last')(c5)
f5=p5
    
o=f5
o=(Conv2D(4096, (7,7), activation='relu', padding='same', data_format='channels_last'))(o)
o=Dropout(0.5)(o)
o=(Conv2D(4096, (1,1), activation='relu', padding='same', data_format='channels_last'))(o)
o=Dropout(0.5)(o)
o=(Conv2D(n_classes, (1,1), kernel_initializer='he_normal', data_format='channels_last'))(o)
o=(Conv2DTranspose(n_classes, kernel_size=(4,4), strides=(2,2), use_bias=False, data_format='channels_last'))(o)
    
o2=f4
o2=(Conv2D(n_classes, (1,1), kernel_initializer='he_normal', data_format='channels_last'))(o2)
o,o2=crop(o,o2,inputs)
o=layers.Add()([o,o2])
o=(Conv2DTranspose(n_classes, kernel_size=(4,4), strides=(2,2), use_bias=False, data_format='channels_last'))(o)
    
o2=f3
o2=(Conv2D(n_classes, (1,1), kernel_initializer='he_normal', data_format='channels_last'))(o2)
o2,o=crop(o2,o,inputs)
o=layers.Add()([o2,o])
o=(Conv2DTranspose(n_classes, kernel_size=(16,16), strides=(8,8), use_bias=False, padding='same', data_format='channels_last'))(o)

model=Model(inputs,o)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=[mean_iou])
def Unet(img_size):
    inputs = Input((img_size, img_size, 3))
    s = Lambda(lambda x: x / 255)(inputs)

    c1 = Conv2D(16, (3, 3), kernel_initializer='he_normal', padding='same')(s)
    c1 = BatchNormalization(axis=3)(c1)
    c1 = Activation('elu')(c1)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(16, (3, 3), kernel_initializer='he_normal', padding='same')(c1)
    c1 = BatchNormalization(axis=3)(c1)
    c1 = Activation('elu')(c1)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(16, (3, 3), kernel_initializer='he_normal', padding='same')(c1)
    c1 = BatchNormalization(axis=3)(c1)
    c1 = Activation('elu')(c1)
    p1 = MaxPooling2D((2, 2))(c1)
    a1 = AveragePooling2D((2, 2))(c1)
    p1 = concatenate([p1, a1])

    c2 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p1)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c2)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)
    a2 = AveragePooling2D((2, 2))(c2)
    p2 = concatenate([p2, a2])

    c3 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p2)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c3)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)
    a3 = AveragePooling2D((2, 2))(c3)
    p3 = concatenate([p3, a3])

    c4 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p3)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c4)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)
    a4 = AveragePooling2D((2, 2))(c4)
    p4 = concatenate([p4, a4])

    c5 = Conv2D(256, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p4)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(256, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c5)

    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c6)

    q7 = GatedUnit(p3, p4)
    j7 = GatedRefinementUnit(q7, c6)
    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3, j7])
    c7 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c7)

    q8 = GatedUnit(p2, p3)
    j8 = GatedRefinementUnit(q8, c7)
    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2, j8])
    c8 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c8)

    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
    model = Model(inputs=[inputs], outputs=[outputs])
    return model
Example #3
0
c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)

c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
p3 = MaxPooling2D((2, 2))(c3)

c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)

c5 = Conv2D(128, (3, 3), activation='relu', padding='same')(p4)
c5 = Conv2D(128, (3, 3), activation='relu', padding='same')(c5)

u6 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(64, (3, 3), activation='relu', padding='same')(u6)
c6 = Conv2D(64, (3, 3), activation='relu', padding='same')(c6)

u7 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(32, (3, 3), activation='relu', padding='same')(u7)
c7 = Conv2D(32, (3, 3), activation='relu', padding='same')(c7)

u8 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(16, (3, 3), activation='relu', padding='same')(u8)
c8 = Conv2D(16, (3, 3), activation='relu', padding='same')(c8)

u9 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same')(c8)
Example #4
0
def get_unet_model(IMG_HEIGHT=300, IMG_WIDTH=300, IMG_CHANNELS=3):
    inputs = layers.Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
    s = Lambda(lambda x: x / 1)(inputs)

    c1 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(s)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p1)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p2)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p3)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = Conv2D(256, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p4)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(256, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c5)

    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(128, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c6)

    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(64, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c7)

    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(32, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c8)

    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(16, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)

    model = Model(inputs=[inputs], outputs=[outputs])
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[mean_iou])
    model.summary()
    return model
Example #5
0
    def build(self,
              n_depth_layers,
              n_init_filters,
              IMG_HEIGHT=256,
              IMG_WIDTH=256,
              IMG_CHANNELS=3,
              verbose=1,
              initializer=glorot_normal,
              x_max=1.,
              dropouts_frac=None):
        inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), name="l0_input")
        s = Lambda(lambda x: x / x_max, name="l0_normalize")(inputs)
        tmp = s

        # encoder layers:
        encoder_layers = dict()
        n_filters = n_init_filters
        for i in range(n_depth_layers):
            #             print(n_filters)
            encoder_layers[i + 1] = dict()
            tmp = encoder_layers[i + 1]["1c"] = Conv2D(
                n_filters, (3, 3),
                activation='relu',
                padding='same',
                name="enc_l%d_1c" % (i + 1),
                kernel_initializer=initializer())(tmp)
            tmp = encoder_layers[i + 1]["2c"] = Conv2D(
                n_filters, (3, 3),
                activation='relu',
                padding='same',
                name="enc_l%d_2c" % (i + 1),
                kernel_initializer=initializer())(tmp)
            tmp = encoder_layers[i + 1]["3p"] = MaxPooling2D(
                (2, 2), name="enc_l%d_3p" % (i + 1))(tmp)
            n_filters = 2 * n_filters
            encoder = tmp

        # central layers:
        central_convs = dict()
        #         print(n_filters)
        tmp = central_convs[1] = Conv2D(n_filters, (3, 3),
                                        activation='relu',
                                        padding='same',
                                        name="mid_1conv",
                                        kernel_initializer=initializer())(tmp)
        tmp = central_convs[2] = Conv2D(n_filters, (3, 3),
                                        activation='relu',
                                        padding='same',
                                        name="mid_2conv",
                                        kernel_initializer=initializer())(tmp)

        # # decoder layers:
        decoder_layers = dict()
        for i in range(n_depth_layers):
            n_filters = n_filters // 2
            #             print(n_filters)
            decoder_layers[i + 1] = dict()
            tmp = decoder_layers[i + 1]["1u"] = Conv2DTranspose(
                n_filters, (2, 2),
                strides=(2, 2),
                padding='same',
                name="dec_l%d_1u" % (i + 1),
                kernel_initializer=initializer())(tmp)
            tmp = decoder_layers[i + 1]["2concat"] = concatenate(
                [tmp, encoder_layers[n_depth_layers - (i)]["2c"]],
                name="dec_l%d_2concat" % (i + 1))
            tmp = decoder_layers[i + 1]["3c"] = Conv2D(
                n_filters, (3, 3),
                activation='relu',
                padding='same',
                name="dec_l%d_3c" % (i + 1),
                kernel_initializer=initializer())(tmp)
            tmp = decoder_layers[i + 1]["4c"] = Conv2D(
                n_filters, (3, 3),
                activation='relu',
                padding='same',
                name="dec_l%d_4c" % (i + 1),
                kernel_initializer=initializer())(tmp)

        outputs = Conv2D(1, (1, 1), activation='sigmoid')(tmp)

        model = Model(inputs=[inputs], outputs=[outputs])
        if verbose > 0:
            model.summary()

        self.model = model
        return self.model
Example #6
0
def segnet_transposed(nClasses,
                      optimizer=None,
                      input_height=360,
                      input_width=480):
    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2

    img_input = Input(shape=(input_height, input_width, 3))

    # encoder
    x = ZeroPadding2D(padding=(pad, pad))(img_input)
    x = Convolution2D(filter_size, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    l1 = x
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(128, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    l2 = x
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(256, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    l3 = x
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Convolution2D(512, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)
    l4 = x
    x = Activation('relu')(x)

    # decoder
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2DTranspose(512, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)

    #    x = Add()([l4, x])
    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2DTranspose(256, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)

    #   x = Add()([l3, x])
    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2DTranspose(128, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2DTranspose(filter_size, (kernel, kernel), padding='valid')(x)
    x = BatchNormalization()(x)

    # x = Add()([l1, x])
    x = Conv2DTranspose(nClasses, (1, 1), padding='valid')(x)

    out = x
    a = Model(inputs=img_input, outputs=out)

    model = []
    a.outputHeight = a.output_shape[1]
    a.outputWidth = a.output_shape[2]

    out = Reshape((a.outputHeight * a.outputWidth, nClasses),
                  input_shape=(nClasses, a.outputHeight, a.outputWidth))(out)
    out = Activation('softmax')(out)
    #    if not optimizer is None:
    #        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])
    model = Model(inputs=img_input, outputs=out)
    model.outputHeight = a.outputHeight
    model.outputWidth = a.outputWidth

    return model
Example #7
0
X_train = X_train.reshape(60000, 28, 28, 1)
X_test = X_test.reshape(10000, 28, 28, 1)
X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255

z_dim = 100


adam = Adam(lr=0.0002, beta_1=0.5)

g = Sequential()
g.add(Dense(7*7*112, input_dim=z_dim))
g.add(Reshape((7, 7, 112)))
g.add(BatchNormalization())
g.add(LeakyReLU(alpha=0.2))
g.add(Conv2DTranspose(56, 5, strides=2, padding='same'))
g.add(BatchNormalization())
g.add(LeakyReLU(alpha=0.2))
g.add(Conv2DTranspose(1, 5, strides=2, padding='same', activation='sigmoid'))
g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
#g.summary()

d = Sequential()
d.add(Conv2D(56, 5, strides=2, padding='same', input_shape=(28, 28, 1)))
d.add(LeakyReLU(alpha=0.2))
d.add(Conv2D(112, 5, strides=2, padding='same'))
d.add(BatchNormalization())
d.add(LeakyReLU(alpha=0.2))
d.add(Conv2D(224, 5, strides=2, padding='same'))
d.add(LeakyReLU(alpha=0.2))
d.add(Flatten())
Example #8
0
def build_vae(n_row,
              n_col,
              n_chn,
              output_size,
              alpha=1,
              lr=1e-3,
              leaky_relu_alpha=0.2):
    opt = Adam(lr=lr)
    # encoder
    vae_input = Input(shape=(n_row, n_col, n_chn))
    x = Conv2D(n_filters[0], (3, 3), strides=2, padding='same')(vae_input)
    x = BatchNormalization(axis=-1)(x)
    x = LeakyReLU(alpha=leaky_relu_alpha)(x)
    x = Conv2D(n_filters[1], (3, 3), strides=2, padding='same')(x)
    x = BatchNormalization(axis=-1)(x)
    x = LeakyReLU(alpha=leaky_relu_alpha)(x)
    shape_before_flatten = x._keras_shape[1:]
    x = Flatten()(x)
    x = Dense(1024, activation='relu')(x)
    z_mean = Dense(output_size, activation='linear',
                   name='z_mean')(x)  # mean of z
    z_log_var = Dense(output_size, activation='linear',
                      name='z_log_var')(x)  # log variance of z
    z_mean, z_log_var = KLDivergenceLossLayer(name='kl_divergence_loss')(
        [z_mean, z_log_var])  # add KL loss
    z = Lambda(sample_z, output_shape=(output_size, ),
               name='z')([z_mean, z_log_var])  # reparametrization
    encoder = Model(vae_input, z_mean)

    # define decoder/generator layers
    decoder_hidden = Dense(1024, activation='relu')
    decoder_expand = Dense(np.prod(shape_before_flatten), activation='relu')
    decoder_reshape = Reshape(shape_before_flatten)
    decoder_deconv_1 = Conv2DTranspose(n_filters[1], (3, 3),
                                       strides=2,
                                       padding='same')
    decoder_bn_1 = BatchNormalization(axis=-1)
    decoder_actv_1 = Activation('relu')
    decoder_deconv_2 = Conv2DTranspose(n_filters[0], (3, 3),
                                       strides=2,
                                       padding='same')
    decoder_bn_2 = BatchNormalization(axis=-1)
    decoder_actv_2 = Activation('relu')
    decoder_deconv_3 = Conv2DTranspose(
        n_chn, (3, 3), strides=1, padding='same',
        activation='sigmoid')  # output in [0, 1]

    # decoder
    x = decoder_hidden(z)
    x = decoder_expand(x)
    x = decoder_reshape(x)
    x = decoder_deconv_1(x)
    x = decoder_bn_1(x)
    x = decoder_actv_1(x)
    x = decoder_deconv_2(x)
    x = decoder_bn_2(x)
    x = decoder_actv_2(x)
    decoded_output = decoder_deconv_3(x)

    # VAE model
    vae = Model(vae_input, decoded_output)
    vae.compile(optimizer=opt,
                loss=neg_log_ll(n_row, n_col, n_chn, alpha=alpha)
                )  # add negative log-likelihood of Gaussian distribution
    vae.summary()

    # generator
    gen_input = Input(shape=(output_size, ))
    x = decoder_hidden(gen_input)
    x = decoder_expand(x)
    x = decoder_reshape(x)
    x = decoder_deconv_1(x)
    x = decoder_bn_1(x)
    x = decoder_actv_1(x)
    x = decoder_deconv_2(x)
    x = decoder_bn_2(x)
    x = decoder_actv_2(x)
    gen_output = decoder_deconv_3(x)
    generator = Model(gen_input, gen_output)

    return vae, encoder, generator
            kernel_initializer='he_normal',
            padding='same')(p1)
c2 = Conv2D(32, (3, 3),
            activation='relu',
            kernel_initializer='he_normal',
            padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)

c3 = Conv2D(64, (3, 3),
            activation='relu',
            kernel_initializer='he_normal',
            padding='same')(p2)
c3 = Conv2D(64, (3, 3),
            activation='relu',
            kernel_initializer='he_normal',
            padding='same')(c3)
p3 = MaxPooling2D((2, 2))(c3)

c4 = Conv2DTranspose(16, (2, 2), strides=(8, 8), padding='same')(p3)
outputs = Conv2D(1, (1, 1), activation='sigmoid')(c4)

model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[mean_iou])
model.summary()

results = model.fit(X_train,
                    Y_train,
                    validation_split=0.2,
                    batch_size=16,
                    epochs=100)
Example #10
0
def get_generator():
    model_input = Input(shape=hp.video_shape)
    input_copied = Lambda(lambda x: x,
                          input_shape=model_input.shape[1:])(model_input)

    # Change below here
    model = Conv3D(filters=16,
                   kernel_size=3,
                   strides=1,
                   padding='valid',
                   data_format='channels_last',
                   kernel_regularizer=l2(regularization_penalty))(model_input)
    model = Activation('relu')(model)
    model = Conv3D(filters=16,
                   kernel_size=3,
                   strides=1,
                   padding='valid',
                   data_format='channels_last',
                   kernel_regularizer=l2(regularization_penalty))(model)
    model = MaxPooling3D(pool_size=(2, 2, 2),
                         strides=1,
                         padding='valid',
                         data_format='channels_last')(model)
    model = Conv3D(filters=32,
                   kernel_size=3,
                   strides=1,
                   padding='valid',
                   data_format='channels_last',
                   kernel_regularizer=l2(regularization_penalty))(model)
    model = Activation('relu')(model)
    model = Conv3D(filters=32,
                   kernel_size=(1, 3, 3),
                   strides=1,
                   padding='valid',
                   data_format='channels_last',
                   kernel_regularizer=l2(regularization_penalty))(model)
    model = Activation('relu')(model)
    model = Conv3D(filters=64,
                   kernel_size=(1, 3, 3),
                   strides=1,
                   padding='valid',
                   data_format='channels_last',
                   kernel_regularizer=l2(regularization_penalty))(model)
    model = Activation('relu')(model)
    model = Conv3D(filters=64,
                   kernel_size=(1, 3, 3),
                   strides=1,
                   padding='valid',
                   data_format='channels_last',
                   kernel_regularizer=l2(regularization_penalty))(model)
    model = MaxPooling3D(pool_size=(2, 2, 2),
                         strides=1,
                         padding='valid',
                         data_format='channels_last')(model)
    model = Flatten()(model)
    model = Dense(1024, activation='relu')(model)

    # Change above here

    model = Dense(units=256 * hp.d)(model)
    # Add layers here to connect video_size to the 100 units
    model = Reshape((1, 16, 16 * hp.d), input_shape=(256 * hp.d, ))(model)
    model = Activation('relu')(model)
    model = Conv2DTranspose(
        8 * hp.d, (1, 25),
        strides=(1, 4),
        padding="same",
        data_format='channels_last',
        kernel_regularizer=l2(regularization_penalty))(model)
    model = Activation('relu')(model)
    model = Conv2DTranspose(
        4 * hp.d, (1, 25),
        strides=(1, 4),
        padding="same",
        data_format='channels_last',
        kernel_regularizer=l2(regularization_penalty))(model)
    model = Activation('relu')(model)
    model = Conv2DTranspose(
        2 * hp.d, (1, 25),
        strides=(1, 4),
        padding="same",
        data_format='channels_last',
        kernel_regularizer=l2(regularization_penalty))(model)
    model = Activation('relu')(model)
    model = Conv2DTranspose(
        hp.d, (1, 25),
        strides=(1, 4),
        padding="same",
        data_format='channels_last',
        kernel_regularizer=l2(regularization_penalty))(model)
    model = Activation('relu')(model)
    model = Conv2DTranspose(
        hp.c, (1, 25),
        strides=(1, 4),
        padding="same",
        data_format='channels_last',
        kernel_regularizer=l2(regularization_penalty))(model)
    model = Activation('tanh')(model)
    model = Reshape((16384, hp.c), input_shape=(1, 16384, hp.c))(model)

    return Model(inputs=model_input, outputs=(model, input_copied))
Example #11
0
def make_generator(dense=True):
    """Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images
    of size 28x28x1."""
    model = Sequential()

    # ------------------------------ Layer 1: Dense + LeakyReLu ---------------------------------------
    if dense:
        model.add(Dense(1024, input_dim=100))
        model.add(LeakyReLU())
        model.add(Dense(128 * 7 * 7))
    else:
        model.add(Dense(128 * 7 * 7, input_dim=100))

    # ------------------------------ Layer 2: Dense + LeakyReLu ---------------------------------------

    model.add(BatchNormalization())
    model.add(LeakyReLU())

    # - - - - - - - - - - - - - - - - - - - Reshape  - - - - - - - - - - - - - - - -
    if K.image_data_format() == 'channels_first':
        # size: 128 x 7 x 7
        model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7, )))
        bn_axis = 1  # first
    else:
        # size: 7 x 7 x 128
        model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7, )))
        bn_axis = -1  # last

    # ------------------------------ Layer 3: DeConv2D + LeakyReLu ---------------------------------------
    model.add(
        Conv2DTranspose(filters=128,
                        kernel_size=(5, 5),
                        strides=2,
                        padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())

    # ------------------------------ Layer 4: Conv2D + LeakyReLu ---------------------------------------
    model.add(Convolution2D(64, (5, 5), padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())

    # ------------------------------ Layer 5: DeConv2D + LeakyReLu ---------------------------------------
    model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())

    # ------------------------------ Layer 6: Conv2D + Tanh ---------------------------------------
    # Because we normalized training inputs to lie in the range [-1, 1],
    # the tanh function should be used for the output of the generator to ensure its output
    # also lies in this range.
    model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh'))

    # our idea:
    # seed 100
    # layer1: dense 1024
    # layer2: dense 7*7*128
    # reshape 7 x 7 x 128
    # layer3: Deconv 14 x 14 x 128
    # layer4: Conv   14 x 14 x 64
    # layer5: Deconv 28 x 28 x 64
    # layer6: Conv   28 x 28 x 1

    return model
Example #12
0
    def build_generator(self):

        model = Sequential()

        model.add(Reshape((1, 1, self.latent_dim)))

        # model.add(Conv2DTranspose(512,(3,3)))
        # model.add(BatchNormalization(momentum=0.8))
        # model.add(LeakyReLU(alpha=0.2))

        # model.add(Conv2DTranspose(256,(3,3),strides=(2,2)))
        # model.add(BatchNormalization(momentum=0.8))
        # model.add(LeakyReLU(alpha=0.2))

        # model.add(Conv2DTranspose(128,(3,3),strides=(2,2),padding='same'))
        # model.add(BatchNormalization(momentum=0.8))
        # model.add(LeakyReLU(alpha=0.2))

        # model.add(Conv2DTranspose(self.channels,(3,3),strides=(2,2),padding='same'))
        # model.add(Activation('tanh'))

        model.add(Conv2DTranspose(256, (3, 3)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2D(128, (3, 3), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2D(128, (3, 3), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2DTranspose(128, (3, 3), strides=(2, 2)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2D(64, (3, 3), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2D(64, (3, 3), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2DTranspose(64, (3, 3), strides=(2, 2), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2D(32, (3, 3), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2D(32, (3, 3), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2DTranspose(16, (3, 3), strides=(2, 2), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2D(8, (3, 3), padding='same'))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))

        model.add(Conv2D(self.channels, (3, 3), padding='same'))
        model.add(Activation('tanh'))

        # model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        model.summary()

        return Model(noise, img)
def get_vgg_7conv(input_shape):
    img_input = Input(input_shape)
    vgg16_base = VGG16(input_tensor=img_input, include_top=False)
    for l in vgg16_base.layers:
        l.trainable = True
    conv1 = vgg16_base.get_layer("block1_conv2").output
    conv2 = vgg16_base.get_layer("block2_conv2").output
    conv3 = vgg16_base.get_layer("block3_conv3").output
    pool3 = vgg16_base.get_layer("block3_pool").output

    conv4 = Conv2D(384, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal",
                   name="block4_conv1")(pool3)
    conv4 = Conv2D(384, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal",
                   name="block4_conv2")(conv4)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(conv4)

    conv5 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal",
                   name="block5_conv1")(pool4)
    conv5 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal",
                   name="block5_conv2")(conv5)
    pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(conv5)

    conv6 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal",
                   name="block6_conv1")(pool5)
    conv6 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal",
                   name="block6_conv2")(conv6)
    pool6 = MaxPooling2D((2, 2), strides=(2, 2), name='block6_pool')(conv6)

    conv7 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal",
                   name="block7_conv1")(pool6)
    conv7 = Conv2D(512, (3, 3), activation="relu", padding='same', kernel_initializer="he_normal",
                   name="block7_conv2")(conv7)

    up8 = concatenate([Conv2DTranspose(384, (3, 3), activation="relu", kernel_initializer="he_normal",
                                       strides=(2, 2), padding='same')(conv7), conv6], axis=3)
    conv8 = Conv2D(384, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up8)

    up9 = concatenate([Conv2DTranspose(256, (3, 3), activation="relu", kernel_initializer="he_normal",
                                       strides=(2, 2), padding='same')(conv8), conv5], axis=3)
    conv9 = Conv2D(256, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up9)

    up10 = concatenate([Conv2DTranspose(192, (3, 3), activation="relu", kernel_initializer="he_normal",
                                        strides=(2, 2), padding='same')(conv9), conv4], axis=3)
    conv10 = Conv2D(192, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up10)

    up11 = concatenate([Conv2DTranspose(128, (3, 3), activation="relu", kernel_initializer="he_normal",
                                        strides=(2, 2), padding='same')(conv10), conv3], axis=3)
    conv11 = Conv2D(128, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up11)

    up12 = concatenate([Conv2DTranspose(64, (3, 3), activation="relu", kernel_initializer="he_normal",
                                        strides=(2, 2), padding='same')(conv11), conv2], axis=3)
    conv12 = Conv2D(64, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up12)

    up13 = concatenate([Conv2DTranspose(32, (3, 3), activation="relu", kernel_initializer="he_normal",
                                        strides=(2, 2), padding='same')(conv12), conv1], axis=3)
    conv13 = Conv2D(32, (3, 3), activation="relu", kernel_initializer="he_normal", padding='same')(up13)

    conv13 = Conv2D(1, (1, 1))(conv13)
    conv13 = Activation("sigmoid")(conv13)
    model = Model(img_input, conv13)
    return model
Example #14
0
    def build(self):
        def conv2d(layer_input, filters, f_size=4, bn=True):
            d = Conv2D(filters, kernel_size=f_size, strides=2,
                       padding='same')(layer_input)
            if bn:
                d = BatchNormalization(momentum=0.8)(d)
            d = LeakyReLU(alpha=0.2)(d)

            return d

        def deconv2d(layer_input,
                     skip_input,
                     filters,
                     f_size=4,
                     dropout_rate=0):
            u = Conv2DTranspose(filters,
                                kernel_size=f_size,
                                strides=(2, 2),
                                padding='same',
                                activation='linear')(layer_input)
            u = Conv2D(filters,
                       kernel_size=f_size,
                       strides=1,
                       padding='same',
                       activation='relu')(u)

            u = BatchNormalization(momentum=0.8)(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            if self.skip_connection:
                u = Concatenate()([u, skip_input])

            return u

        # Image input
        d0 = Input(shape=self.img_shape)

        # Downsampling: 7 x stride of 2 --> x1/128 downsampling
        d1 = conv2d(d0, self.filters, bn=False)
        d2 = conv2d(d1, self.filters * 2)
        d3 = conv2d(d2, self.filters * 4)
        d4 = conv2d(d3, self.filters * 8)
        d5 = conv2d(d4, self.filters * 8)
        d6 = conv2d(d5, self.filters * 8)
        d7 = conv2d(d6, self.filters * 8)

        # Upsampling: 6 x stride of 2 --> x64 upsampling
        u1 = deconv2d(d7, d6, self.filters * 8)
        u2 = deconv2d(u1, d5, self.filters * 8)
        u3 = deconv2d(u2, d4, self.filters * 8)
        u4 = deconv2d(u3, d3, self.filters * 4)
        u5 = deconv2d(u4, d2, self.filters * 2)
        u6 = deconv2d(u5, d1, self.filters)
        u7 = Conv2DTranspose(self.channels,
                             kernel_size=4,
                             strides=(2, 2),
                             padding='same',
                             activation='linear')(u6)

        # added conv layers after the deconvs to avoid the pixelated outputs
        output_img = Conv2D(self.channels,
                            kernel_size=4,
                            strides=1,
                            padding='same',
                            activation=self.output_activation)(u7)

        return Model(d0, output_img)
Example #15
0
def Unet(input_img, n_filters=32, dropout=0.4, batch_norm=True):

    c1 = conv_block(input_img, n_filters, 3, batch_norm)
    p1 = Conv2D(n_filters,
                kernel_size=(3, 3),
                strides=2,
                padding='same',
                kernel_initializer='he_normal')(c1)
    #p1 = Dropout(dropout)(p1)

    c2 = conv_block(p1, n_filters * 2, 3, batch_norm)
    p2 = Conv2D(n_filters,
                kernel_size=(3, 3),
                strides=2,
                padding='same',
                kernel_initializer='he_normal')(c2)
    #p2 = Dropout(dropout)(p2)

    c3 = conv_block(p2, n_filters * 4, 3, batch_norm)
    p3 = Conv2D(n_filters,
                kernel_size=(3, 3),
                strides=2,
                padding='same',
                kernel_initializer='he_normal')(c3)
    #p3 = Dropout(dropout)(p3)

    c4 = conv_block(p3, n_filters * 8, 3, batch_norm)
    p4 = Conv2D(n_filters,
                kernel_size=(3, 3),
                strides=2,
                padding='same',
                kernel_initializer='he_normal')(c4)
    #p4 = Dropout(dropout)(p4)

    c5 = conv_block(p4, n_filters * 16, 3, batch_norm)

    u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2),
                         padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = conv_block(u6, n_filters * 8, 3, batch_norm)
    #c6 = Dropout(dropout)(c6)
    u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2),
                         padding='same')(c6)

    u7 = concatenate([u7, c3])
    c7 = conv_block(u7, n_filters * 4, 3, batch_norm)
    #c7 = Dropout(dropout)(c7)
    u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2),
                         padding='same')(c7)
    u8 = concatenate([u8, c2])

    c8 = conv_block(u8, n_filters * 2, 3, batch_norm)
    #c8 = Dropout(dropout)(c8)
    u9 = Conv2DTranspose(n_filters, (3, 3), strides=(2, 2), padding='same')(c8)

    u9 = concatenate([u9, c1])

    c9 = conv_block(u9, n_filters, 3, batch_norm)
    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)

    model = Model(inputs=input_img, outputs=outputs)

    return model
Example #16
0
def get_unet_parallel(img_width=512,
                      img_height=512,
                      img_channels=1,
                      activation='elu',
                      kernel_initializer='he_normal',
                      optimizer='adam',
                      loss='binary_crossentropy'):
    from keras.utils import multi_gpu_model
    import tensorflow as tf
    import keras.backend.tensorflow_backend as tfback

    def _get_available_gpus():
        """Get a list of available gpu devices (formatted as strings).

        # Returns
            A list of available GPU devices.
        """

        if tfback._LOCAL_DEVICES is None:
            devices = tf.config.list_logical_devices()
            tfback._LOCAL_DEVICES = [x.name for x in devices]
        return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]

    tfback._get_available_gpus = _get_available_gpus
    inputs = Input((img_height, img_width, img_channels))
    s = Lambda(lambda x: x / 255)(inputs)
    c1 = Conv2D(16, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(s)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(16, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)
    c2 = Conv2D(32, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(p1)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(32, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(64, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(p2)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(64, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(128, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(p3)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(128, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = Conv2D(256, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(p4)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(256, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c5)

    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(u6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(128, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c6)

    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(64, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(u7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(64, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c7)

    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(32, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(u8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(32, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c8)

    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(u9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(16, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)

    model = Model(inputs=[inputs], outputs=[outputs])
    parallel_model = multi_gpu_model(model)

    parallel_model.compile(optimizer=optimizer, loss=loss, metrics=[dice_coef])
    return parallel_model
Example #17
0
if CONTINUE_TRAIN:
	print "Loading Model..."
	model = load_model('Encoder.h5')
else:
	print "Building Model..."
	model = Sequential()

	model.add(Embedding(num_samples, PARAM_SIZE, input_length=1))
	model.add(Flatten(name='pre_encoder'))
	print model.output_shape
	assert(model.output_shape == (None, PARAM_SIZE))
	
	model.add(Reshape((PARAM_SIZE, 1, 1), name='encoder'))
	print model.output_shape
	
	model.add(Conv2DTranspose(256, (4, 1)))           #(4, 1)
	model.add(Activation("relu"))
	print model.output_shape

	model.add(Conv2DTranspose(256, 4))                #(7, 4)
	model.add(Activation("relu"))
	print model.output_shape
	
	model.add(Conv2DTranspose(256, 4))                #(10, 7)
	model.add(Activation("relu"))
	print model.output_shape
	
	model.add(Conv2DTranspose(256, 4, strides=2))     #(22, 16)
	model.add(Activation("relu"))
	print model.output_shape
	
Example #18
0
def get_unet(img_width=512,
             img_height=512,
             img_channels=1,
             activation='elu',
             kernel_initializer='he_normal',
             optimizer='adam',
             loss='binary_crossentropy'):
    inputs = Input((img_height, img_width, img_channels))
    s = Lambda(lambda x: x / 255)(inputs)
    c1 = Conv2D(16, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(s)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(16, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)
    c2 = Conv2D(32, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(p1)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(32, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(64, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(p2)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(64, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(128, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(p3)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(128, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = Conv2D(256, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(p4)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(256, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c5)

    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(u6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(128, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c6)

    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(64, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(u7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(64, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c7)

    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(32, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(u8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(32, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c8)

    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(u9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(16, (3, 3),
                activation=activation,
                kernel_initializer=kernel_initializer,
                padding='same')(c9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)

    model = Model(inputs=[inputs], outputs=[outputs])
    model.compile(optimizer=optimizer, loss=loss, metrics=[dice_coef])
    return model
Example #19
0
def unet_model(im_height, im_width, im_chan):

    input_img = Input((im_height, im_width, im_chan), name='img')

    res1 = resnet_block(input_img, filters=4, strides=1)

    res1 = resnet_block(res1, filters=4, strides=1)

    res2 = resnet_block(res1, filters=8, strides=2)

    res2 = resnet_block(res2, filters=8, strides=1)

    res3 = resnet_block(res2, filters=16, strides=2)

    res3 = resnet_block(res3, filters=16, strides=1)

    res4 = resnet_block(res3, filters=32, strides=2)

    res4 = resnet_block(res4, filters=32, strides=1)

    res5 = resnet_block(res4, filters=64, strides=2)

    res5 = resnet_block(res5, filters=64, strides=1)

    mid = convlayer(res5,
                    filters=128,
                    kernel_size=3,
                    strides=2,
                    activation='relu')
    mid = convlayer(mid,
                    filters=128,
                    kernel_size=3,
                    strides=1,
                    activation='relu')

    up1 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(mid)
    up1 = concatenate([up1, res5])
    c1 = convlayer(up1,
                   filters=64,
                   kernel_size=3,
                   strides=1,
                   activation='relu')
    c1 = convlayer(c1, filters=64, kernel_size=3, strides=1, activation='relu')

    up2 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c1)
    up2 = concatenate([up2, res4])
    c2 = convlayer(up2,
                   filters=32,
                   kernel_size=3,
                   strides=1,
                   activation='relu')
    c2 = convlayer(c2, filters=32, kernel_size=3, strides=1, activation='relu')

    up3 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c2)
    up3 = concatenate([up3, res3])
    c3 = convlayer(up3,
                   filters=16,
                   kernel_size=3,
                   strides=1,
                   activation='relu')
    c3 = convlayer(c3, filters=16, kernel_size=3, strides=1, activation='relu')

    up4 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same')(c3)
    up4 = concatenate([up4, res2])
    c4 = convlayer(up4, filters=8, kernel_size=3, strides=1, activation='relu')
    c4 = convlayer(c4, filters=8, kernel_size=3, strides=1, activation='relu')

    up5 = Conv2DTranspose(4, (2, 2), strides=(2, 2), padding='same')(c4)
    up5 = concatenate([up5, res1], axis=3)
    c5 = convlayer(up5, filters=4, kernel_size=3, strides=1, activation='relu')
    c5 = convlayer(c5, filters=4, kernel_size=3, strides=1, activation='relu')

    outputs = convlayer(c5,
                        filters=1,
                        kernel_size=1,
                        strides=1,
                        activation='sigmoid')

    model = Model(inputs=[input_img], outputs=[outputs])

    return model
def TransitionUp(skip_connection, block_to_upsample, n_filters_keep):
    '''Performs upsampling on block_to_upsample by a factor 2 and concatenates it with the skip_connection'''
    #Upsample and concatenate with skip connection
    l = Conv2DTranspose(n_filters_keep, kernel_size=3, strides=2, padding='same', kernel_initializer='he_uniform')(block_to_upsample)
    l = concatenate([l, skip_connection], axis=-1)
    return l
def build_model(input_layer, start_neurons, DropoutRatio=0.5):
    # 101 -> 50
    conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                   padding="same")(input_layer)
    conv1 = residual_block(conv1, start_neurons * 1)
    conv1 = residual_block(conv1, start_neurons * 1, True)
    pool1 = MaxPooling2D((2, 2))(conv1)
    pool1 = Dropout(DropoutRatio / 2)(pool1)

    # 50 -> 25
    conv2 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                   padding="same")(pool1)
    conv2 = residual_block(conv2, start_neurons * 2)
    conv2 = residual_block(conv2, start_neurons * 2, True)
    pool2 = MaxPooling2D((2, 2))(conv2)
    pool2 = Dropout(DropoutRatio)(pool2)

    # 25 -> 12
    conv3 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                   padding="same")(pool2)
    conv3 = residual_block(conv3, start_neurons * 4)
    conv3 = residual_block(conv3, start_neurons * 4, True)
    pool3 = MaxPooling2D((2, 2))(conv3)
    pool3 = Dropout(DropoutRatio)(pool3)

    # 12 -> 6
    conv4 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                   padding="same")(pool3)
    conv4 = residual_block(conv4, start_neurons * 8)
    conv4 = residual_block(conv4, start_neurons * 8, True)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(DropoutRatio)(pool4)

    # Middle
    convm = Conv2D(start_neurons * 16, (3, 3), activation=None,
                   padding="same")(pool4)
    convm = residual_block(convm, start_neurons * 16)
    convm = residual_block(convm, start_neurons * 16, True)

    # 6 -> 12
    deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3),
                              strides=(2, 2),
                              padding="same")(convm)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(DropoutRatio)(uconv4)

    uconv4 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                    padding="same")(uconv4)
    uconv4 = residual_block(uconv4, start_neurons * 8)
    uconv4 = residual_block(uconv4, start_neurons * 8, True)

    # 12 -> 25
    #deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv4)
    deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3),
                              strides=(2, 2),
                              padding="valid")(uconv4)
    uconv3 = concatenate([deconv3, conv3])
    uconv3 = Dropout(DropoutRatio)(uconv3)

    uconv3 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                    padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 4)
    uconv3 = residual_block(uconv3, start_neurons * 4, True)

    # 25 -> 50
    deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv3)
    uconv2 = concatenate([deconv2, conv2])

    uconv2 = Dropout(DropoutRatio)(uconv2)
    uconv2 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                    padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 2)
    uconv2 = residual_block(uconv2, start_neurons * 2, True)

    # 50 -> 101
    #deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
    deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3),
                              strides=(2, 2),
                              padding="valid")(uconv2)
    uconv1 = concatenate([deconv1, conv1])

    uconv1 = Dropout(DropoutRatio)(uconv1)
    uconv1 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                    padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 1)
    uconv1 = residual_block(uconv1, start_neurons * 1, True)

    #uconv1 = Dropout(DropoutRatio/2)(uconv1)
    #output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv1)
    output_layer_noActi = Conv2D(1, (1, 1), padding="same",
                                 activation=None)(uconv1)
    output_layer = Activation('sigmoid')(output_layer_noActi)

    return output_layer
Example #22
0
def get_generator(image_shape=None,resize_factor=1.0):

    # define an encoder block
    def encoder_block(layer_in, n_filters, batchnorm=True):
        # weight initialization
        init = RandomNormal(stddev=0.02)
        # add downsampling layer
        g = Conv2D(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
        # conditionally add batch normalization
        if batchnorm:
            g = BatchNormalization()(g, training=True)
        # leaky relu activation
        g = LeakyReLU(alpha=0.2)(g)
        return g
    
    # define a decoder block
    def decoder_block(layer_in, skip_in, n_filters, dropout=True):
        # weight initialization
        init = RandomNormal(stddev=0.02)
        # add upsampling layer
        g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
        # add batch normalization
        g = BatchNormalization()(g, training=True)
        # conditionally add dropout
        if dropout:
            g = Dropout(0.5)(g, training=True)
        # merge with skip connection
        g = Concatenate()([g, skip_in])
        # relu activation
        g = Activation('relu')(g)
        return g
    
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # image input
    in_image = Input(shape=image_shape)#256x256
    # encoder model
    e1 = encoder_block(in_image, int(64*resize_factor), batchnorm=False)#128x128
    e2 = encoder_block(e1, int(128*resize_factor))#64x64
    e3 = encoder_block(e2, int(256*resize_factor))#32x32
    e4 = encoder_block(e3, int(512*resize_factor))#16x16
    e5 = encoder_block(e4, int(512*resize_factor))#8x8
    e6 = encoder_block(e5, int(512*resize_factor))#4x4
    e7 = encoder_block(e6, int(512*resize_factor))#2x2
    # bottleneck, no batch norm and relu
    b = Conv2D(int(512/resize_factor), (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e7)#1x1
    b = Activation('relu')(b)
    # decoder model
    d1 = decoder_block(b, e7, int(512*resize_factor))
    d2 = decoder_block(d1, e6, int(512*resize_factor))
    d3 = decoder_block(d2, e5, int(512*resize_factor))
    d4 = decoder_block(d3, e4, int(512*resize_factor), dropout=False)
    d5 = decoder_block(d4, e3, int(256*resize_factor), dropout=False)
    d6 = decoder_block(d5, e2, int(128*resize_factor), dropout=False)
    d7 = decoder_block(d6, e1, int(64*resize_factor), dropout=False)
    # output
    g = Conv2DTranspose(3, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
#    out_image = Activation('tanh')(g)
    out_image = Activation('sigmoid')(g)
    # define model
    model = Model(in_image, out_image)
    return model
Example #23
0
e4 = BatchNormalization(axis=bnorm_axis)(e4)
e4 = Activation('relu')(e4)
e4 = Conv2D(filters=nfilters[4], kernel_size=(3, 3), padding='same')(e4)
e4 = BatchNormalization(axis=bnorm_axis)(e4)
e4 = Activation('relu')(e4)
#e4 = MaxPooling2D((2, 2))(e4)

####################################
# decoder (expansive path)
####################################

#decoder block 3
d3 = Dropout(drop_rate)(e4, training=drop_train)
d3 = UpSampling2D((2, 2), )(d3)
d3 = concatenate([e3, d3], axis=-1)  #skip connection
d3 = Conv2DTranspose(nfilters[3], (3, 3), padding='same')(d3)
d3 = BatchNormalization(axis=bnorm_axis)(d3)
d3 = Activation('relu')(d3)
d3 = Conv2DTranspose(nfilters[3], (3, 3), padding='same')(d3)
d3 = BatchNormalization(axis=bnorm_axis)(d3)
d3 = Activation('relu')(d3)

#decoder block 2
d2 = Dropout(drop_rate)(d3, training=drop_train)
d2 = UpSampling2D((2, 2), )(d2)
d2 = concatenate([e2, d2], axis=-1)  #skip connection
d2 = Conv2DTranspose(nfilters[2], (3, 3), padding='same')(d2)
d2 = BatchNormalization(axis=bnorm_axis)(d2)
d2 = Activation('relu')(d2)
d2 = Conv2DTranspose(nfilters[2], (3, 3), padding='same')(d2)
d2 = BatchNormalization(axis=bnorm_axis)(d2)
Example #24
0
def U_Net():
    im_width = 128
    im_height = 128
    border = 5
    im_chan = 2  # Number of channels: first is original and second cumsum(axis=0)
    n_features = 1  # Number of extra features, like depth
    #path_train = '../input/train/'
    #path_test = '../input/test/'

    # Build U-Net model
    input_img = Input((im_height, im_width, im_chan), name='img')
    #input_features = Input((n_features, ), name='feat')

    c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(input_img)
    c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
    c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
    c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
    c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
    d4 = Dropout(0.1)(c4)
    p4 = MaxPooling2D((2, 2))(d4)

    # Join features information in the depthest layer
    #f_repeat = RepeatVector(8*8)(input_features)
    #f_conv = Reshape((8, 8, n_features))(f_repeat)
    #p4_feat = concatenate([p4, f_conv], -1)

    c5 = Conv2D(128, (3, 3), activation='relu', padding='same')(p4)
    c5 = Conv2D(128, (3, 3), activation='relu', padding='same')(c5)
    d5 = Dropout(0.1)(c5)

    u6 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(d5)
    #check out this skip connection thooooo
    u6 = concatenate([u6, d4])
    c6 = Conv2D(64, (3, 3), activation='relu', padding='same')(u6)
    c6 = Conv2D(64, (3, 3), activation='relu', padding='same')(c6)

    u7 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(32, (3, 3), activation='relu', padding='same')(u7)
    c7 = Conv2D(32, (3, 3), activation='relu', padding='same')(c7)

    u8 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(16, (3, 3), activation='relu', padding='same')(u8)
    c8 = Conv2D(16, (3, 3), activation='relu', padding='same')(c8)

    u9 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(8, (3, 3), activation='relu', padding='same')(u9)
    c9 = Conv2D(8, (3, 3), activation='relu', padding='same')(c9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)

    return Model(inputs=[input_img], outputs=[outputs])
Example #25
0
def build_model(input_shape):
    inputs = Input(input_shape)

    c1 = Conv2D(8, (3, 3), activation='relu', padding='same') (inputs)
    c1 = Conv2D(8, (3, 3), activation='relu', padding='same') (c1)
    c1 = BN()(c1)
    p1 = MaxPooling2D((2, 2)) (c1)

    c2 = Conv2D(16, (3, 3), activation='relu', padding='same') (p1)
    c2 = Conv2D(16, (3, 3), activation='relu', padding='same') (c2)
    c2 = BN()(c2)
    p2 = MaxPooling2D((2, 2)) (c2)

    c3 = Conv2D(32, (3, 3), activation='relu', padding='same') (p2)
    c3 = Conv2D(32, (3, 3), activation='relu', padding='same') (c3)
    c3 = BN()(c3)
    p3 = MaxPooling2D((2, 2)) (c3)

    c4 = Conv2D(64, (3, 3), activation='relu', padding='same') (p3)
    c4 = Conv2D(64, (3, 3), activation='relu', padding='same') (c4)
    c4 = BN()(c4)
    p4 = MaxPooling2D(pool_size=(2, 2)) (c4)

    c5 = Conv2D(64, (3, 3), activation='relu', padding='same') (p4)
    c5 = Conv2D(64, (3, 3), activation='relu', padding='same') (c5)
    c5 = BN()(c5)
    # p5 = MaxPooling2D(pool_size=(2, 2)) (c5)
    p5 = c5

    c55 = Conv2D(128, (3, 3), activation='relu', padding='same') (p5)
    c55 = Conv2D(128, (3, 3), activation='relu', padding='same') (c55)
    c55 = BN()(c55)

    u6 = Conv2DTranspose(64, (2, 2), strides=(1, 1), padding='same') (c55)
    u6 = concatenate([u6, c5])
    c6 = Conv2D(64, (3, 3), activation='relu', padding='same') (u6)
    c6 = Conv2D(64, (3, 3), activation='relu', padding='same') (c6)
    c6 = BN()(c6)

    u71 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c6)
    u71 = concatenate([u71, c4])
    c71 = Conv2D(32, (3, 3), activation='relu', padding='same') (u71)
    c61 = Conv2D(32, (3, 3), activation='relu', padding='same') (c71)
    c61 = BN()(c61)

    u7 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c61)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(32, (3, 3), activation='relu', padding='same') (u7)
    c7 = Conv2D(32, (3, 3), activation='relu', padding='same') (c7)
    c7 = BN()(c7)

    u8 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(16, (3, 3), activation='relu', padding='same') (u8)
    c8 = Conv2D(16, (3, 3), activation='relu', padding='same') (c8)
    c8 = BN()(c8)

    u9 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same') (c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(8, (3, 3), activation='relu', padding='same') (u9)
    c9 = Conv2D(8, (3, 3), activation='relu', padding='same') (c9)
    c9 = BN()(c9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)

    model = Model(inputs=[inputs], outputs=[outputs])
    model.compile(optimizer='adam', loss=bce_dice_loss, metrics=[dice_coef])
    
    return model
Example #26
0
center = layers.Input(shape=(32, 32, 3))
center_padded = ZeroPadding2D(padding=(16, 16))(center)
merged = layers.add([center_padded, context])
merger_model = models.Model([center, context], merged)

# Loading pre-trained encoder
enc = load_model('encoder.h5')
make_trainable(enc, False)

# Generator: takes as input the output of the enconder (128x4x4)
gen = Sequential()

gen.add(
    Conv2DTranspose(512,
                    5,
                    padding='same',
                    strides=(2, 2),
                    input_shape=(4, 4, 128)))
gen.add(BatchNormalization())
gen.add(Activation('relu'))
gen.add(GaussianNoise(0.02))

gen.add(Conv2D(256, 5, padding='same', strides=(1, 1)))
gen.add(Activation('relu'))

gen.add(Conv2DTranspose(256, 5, padding='same', strides=(2, 2)))
gen.add(BatchNormalization())
gen.add(Activation('relu'))
gen.add(GaussianNoise(0.02))

gen.add(Conv2D(128, 5, padding='same', strides=(1, 1)))
Example #27
0
def build(img_shape,
          nclasses=6,
          l2_reg=0.,
          init='glorot_uniform',
          padding=100,
          dropout=True):

    # Regularization warning
    if l2_reg > 0.:
        print("Regularizing the weights: " + str(l2_reg))

    # Input
    inputs = Input(img_shape, name='input')
    #padded = ZeroPadding2D(padding=(padding, padding), name='padded')(inputs)

    # Block 1
    conv1_1 = Conv2D(64,
                     3,
                     padding='valid',
                     name='conv1_1',
                     kernel_regularizer=l2(l2_reg))(inputs)
    conv1_2 = Conv2D(64,
                     3,
                     padding='valid',
                     name='conv1_2',
                     kernel_regularizer=l2(l2_reg))(conv1_1)
    conv1_2 = BatchNormalization(axis=3)(conv1_2)
    pool1 = MaxPooling2D((2, 2), (2, 2), name='pool1')(conv1_2)

    # Block 2
    conv2_1 = Conv2D(128,
                     3,
                     padding='valid',
                     name='conv2_1',
                     kernel_regularizer=l2(l2_reg))(pool1)
    conv2_2 = Conv2D(128,
                     3,
                     padding='valid',
                     name='conv2_2',
                     kernel_regularizer=l2(l2_reg))(conv2_1)
    conv2_2 = BatchNormalization(axis=3)(conv2_2)
    pool2 = MaxPooling2D((2, 2), (2, 2), name='pool2')(conv2_2)

    # Block 3
    conv3_1 = Conv2D(256,
                     3,
                     padding='valid',
                     name='conv3_1',
                     kernel_regularizer=l2(l2_reg))(pool2)
    conv3_2 = Conv2D(256,
                     3,
                     padding='valid',
                     name='conv3_2',
                     kernel_regularizer=l2(l2_reg))(conv3_1)
    conv3_2 = BatchNormalization(axis=3)(conv3_2)
    pool3 = MaxPooling2D((2, 2), (2, 2), name='pool3')(conv3_2)

    # Block 4
    conv4_1 = Conv2D(512,
                     3,
                     padding='valid',
                     name='conv4_1',
                     kernel_regularizer=l2(l2_reg))(pool3)
    conv4_2 = Conv2D(512,
                     3,
                     padding='valid',
                     name='conv4_2',
                     kernel_regularizer=l2(l2_reg))(conv4_1)
    conv4_2 = BatchNormalization(axis=3)(conv4_2)
    if dropout:
        conv4_2 = Dropout(0.5, name='drop1')(conv4_2)
    pool4 = MaxPooling2D((2, 2), (2, 2), name='pool4')(conv4_2)

    # Block 5
    conv5_1 = Conv2D(1024,
                     3,
                     padding='valid',
                     name='conv5_1',
                     kernel_regularizer=l2(l2_reg))(pool4)
    conv5_2 = Conv2D(1024,
                     3,
                     padding='valid',
                     name='conv5_2',
                     kernel_regularizer=l2(l2_reg))(conv5_1)
    if dropout:
        conv5_2 = Dropout(0.5, name='drop2')(conv5_2)
    # pool5 = MaxPooling2D((2, 2), (2, 2), name='pool4')(conv5_2)
    conv5_2 = BatchNormalization(axis=3)(conv5_2)
    # Upsampling 1
    upconv4_1 = Conv2DTranspose(512, 3, padding='valid',
                                name='upconv4_1')(conv5_2)
    upconv4_2 = Conv2DTranspose(512, 3, padding='valid',
                                name='upconv4_2')(upconv4_1)
    upconv4_3 = UpSampling2D((2, 2), name='upconv4_3')(upconv4_2)
    upconv4_3 = BatchNormalization(axis=3)(upconv4_3)
    #conv4_2_crop = CropLayer2D(upconv4, name='conv4_2_crop')(conv4_2)
    #upconv4_crop = CropLayer2D(upconv4, name='upconv4_crop')(upconv4)
    #Concat_4 = merge([conv4_2_crop, upconv4_crop], mode='concat', concat_axis=3, name='Concat_4')
    #Concat_4 = merge([conv4_2, upconv4_3], mode='concat', concat_axis=3, name='Concat_4')
    Concat_4 = Concatenate(name='Concat_4')([conv4_2, upconv4_3])
    upconv3_1 = Conv2DTranspose(256,
                                3,
                                padding='valid',
                                name='upconv3_1',
                                kernel_regularizer=l2(l2_reg))(Concat_4)
    upconv3_2 = Conv2DTranspose(256,
                                3,
                                padding='valid',
                                name='upconv3_2',
                                kernel_regularizer=l2(l2_reg))(upconv3_1)

    # Upsampling 2
    upconv3_3 = UpSampling2D((2, 2), name='upconv3_3')(upconv3_2)
    upconv3_3 = BatchNormalization(axis=3)(upconv3_3)
    #conv3_2_crop = CropLayer2D(upconv3, name='conv3_2_crop')(conv3_2)
    #Concat_3 = merge([conv3_2, upconv3_3], mode='concat', name='Concat_3')
    Concat_3 = Concatenate(name='Concat_3')([conv3_2, upconv3_3])
    upconv2_1 = Conv2DTranspose(128,
                                3,
                                padding='valid',
                                name='upconv2_1',
                                kernel_regularizer=l2(l2_reg))(Concat_3)
    upconv2_2 = Conv2DTranspose(128,
                                3,
                                padding='valid',
                                name='upconv2_2',
                                kernel_regularizer=l2(l2_reg))(upconv2_1)

    # Upsampling 2
    upconv2_3 = UpSampling2D((2, 2), name='upconv2_3')(upconv2_2)
    upconv2_3 = BatchNormalization(axis=3)(upconv2_3)
    #conv2_2_crop = CropLayer2D(upconv2, name='conv2_2_crop')(conv2_2)
    #Concat_2 = merge([conv2_2, upconv2_3], mode='concat', name='Concat_2')
    Concat_2 = Concatenate(name='Concat_2')([conv2_2, upconv2_3])
    upconv1_1 = Conv2DTranspose(64,
                                3,
                                padding='valid',
                                name='upconv1_1',
                                kernel_regularizer=l2(l2_reg))(Concat_2)
    upconv1_2 = Conv2DTranspose(64,
                                3,
                                padding='valid',
                                name='upconv1_2',
                                kernel_regularizer=l2(l2_reg))(upconv1_1)

    # Upsampling 2
    upconv1_3 = UpSampling2D((2, 2), name='upconv1_3')(upconv1_2)
    upconv1_3 = BatchNormalization(axis=3)(upconv1_3)
    #conv1_2_crop = CropLayer2D(upconv1, name='conv1_2_crop')(conv1_2)
    #Concat_1 = merge([conv1_2, upconv1_3], mode='concat', name='Concat_1')
    Concat_1 = Concatenate(name='Concat_1')([conv1_2, upconv1_3])
    upconv0_1 = Conv2DTranspose(32,
                                3,
                                padding='valid',
                                name='upconv0_1',
                                kernel_regularizer=l2(l2_reg))(Concat_1)
    upconv0_2 = Conv2DTranspose(32,
                                3,
                                padding='valid',
                                name='upconv0_2',
                                kernel_regularizer=l2(l2_reg))(upconv0_1)

    Concat_0 = Concatenate(name='Concat_0')([inputs, upconv0_2])
    #Concat_0 = merge([inputs, upconv0_2], mode='concat', name='Concat_0')

    final_layer = Conv2D(nclasses,
                         1,
                         padding='valid',
                         name='final_layer',
                         kernel_regularizer=l2(l2_reg))(upconv0_2)
    final_layer = BatchNormalization(axis=3)(final_layer)
    # Crop
    #final_crop = CropLayer2D(inputs, name='final_crop')(conv10)
    # Softmax
    softmax_unet_0 = Reshape(
        (img_shape[0] * img_shape[1], nclasses))(final_layer)
    softmax_unet_1 = Activation("softmax")(softmax_unet_0)
    softmax_unet = Reshape(
        (img_shape[0], img_shape[1], nclasses))(softmax_unet_1)

    # Complete model
    model = Model(inputs=inputs, outputs=softmax_unet)

    return model
Example #28
0
def upsample(block, skip_connection, filters, regularizer=regularizers.l2(0.0001)):
    x = Conv2DTranspose(filters, (3, 3), strides=(2, 2), padding='same', kernel_regularizer=regularizer)(block)
    stack = concatenate([skip_connection, x])
    return stack
Example #29
0
def construct_model(im_height, im_width, im_chan, with_r=True):

    inputs = Input((im_height, im_width, im_chan))
    s = Lambda(lambda x: x / 255)(inputs)

    print("Inputs shape:", inputs)

    cc1 = CoordConv(im_height,
                    im_width,
                    with_r,
                    filters=8,
                    kernel_size=(1, 1),
                    activation='relu',
                    padding='same')(s)

    c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(cc1)
    c1 = Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
    c2 = Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
    c3 = Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
    c4 = Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = Conv2D(128, (3, 3), activation='relu', padding='same')(p4)
    c5 = Conv2D(128, (3, 3), activation='relu', padding='same')(c5)

    u6 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(64, (3, 3), activation='relu', padding='same')(u6)
    c6 = Conv2D(64, (3, 3), activation='relu', padding='same')(c6)

    u7 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(32, (3, 3), activation='relu', padding='same')(u7)
    c7 = Conv2D(32, (3, 3), activation='relu', padding='same')(c7)

    u8 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(16, (3, 3), activation='relu', padding='same')(u8)
    c8 = Conv2D(16, (3, 3), activation='relu', padding='same')(c8)

    u9 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(8, (3, 3), activation='relu', padding='same')(u9)
    c9 = Conv2D(8, (3, 3), activation='relu', padding='same')(c9)

    outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)

    model = Model(inputs=[inputs], outputs=[outputs])
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=[mean_iou])
    #model.summary()

    return model
Example #30
0
z = enc_model_1(img)
encoder1 = Model(img, z)

enc_model_1.summary()

encoder1.summary()

# Generator
batch_size = 100

modelG = Sequential()
modelG.add(Dense(128 * 7 * 7, input_dim=latent_dim))
modelG.add(BatchNormalization(momentum=0.8))
modelG.add(LeakyReLU(alpha=0.2))
modelG.add(Reshape((7, 7, 128)))
modelG.add(Conv2DTranspose(128, kernel_size=(3,2), strides=2, padding="same"))
modelG.add(BatchNormalization(momentum=0.8))
modelG.add(LeakyReLU(alpha=0.2))
modelG.add(Conv2DTranspose(64, kernel_size=(3,2), strides=2, padding="same"))
modelG.add(BatchNormalization(momentum=0.8))
modelG.add(LeakyReLU(alpha=0.2))
modelG.add(Conv2DTranspose(1, kernel_size=(3,2), strides=1, padding="same", activation='tanh'))

modelG.summary()

z = Input(shape=(latent_dim,))
gen_img = modelG(z)
generator = Model(z, gen_img)

generator.summary()