def VGG16_FCN32s_model(input_height, input_weight, n_classes):
	# Build conv and pooling layers according to VGG16
	img_input = Input(shape=(input_height, input_weight, 3))
	x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
	x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
	# x = BatchNormalization()(x)
	x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

	x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
	x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
	# x = BatchNormalization()(x)
	x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

	x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
	x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
	x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
	# x = BatchNormalization()(x)
	x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
	fcn3 = x

	x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
	x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
	x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
	# x = BatchNormalization()(x)
	x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
	fcn4 = x

	x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
	x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
	x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
	# x = BatchNormalization()(x)
	x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

	x = Conv2D(512, (7, 7), activation='relu', padding='same', name='block6_conv1')(x)
	x = Dropout(0.5)(x)

	x = Conv2D(512, (1, 1), activation='relu', padding='same', name='block7_conv1')(x)
	x = Dropout(0.5)(x)

	# Make score and do upsampling 2X (16,16) => (32,32)
	x = Conv2D(n_classes, (1, 1), kernel_initializer='he_normal', name='score')(x)
	x = Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2), use_bias=False, name='upscore_1')(x)
	x = Cropping2D(cropping=((1, 1),(1, 1)))(x)

    # Make score with the output of MaxPooling2D_4 and add with the output score
	fcn4 = Conv2D(n_classes, (1, 1), kernel_initializer='he_normal', name='fcn4_score')(fcn4)
	x = Add()([fcn4, x])

	# Do upsampling 2X (32,32)=>(64,64)
	x = Conv2DTranspose(n_classes, kernel_size=(4, 4), strides=(2, 2), use_bias=False, name='upscore_2')(x)
	x = Cropping2D(cropping=((1, 1),(1, 1)))(x)

	# Make score with the output of MaxPooling2D_3 and add with the output score (64,64)
	fcn3 = Conv2D(n_classes, (1, 1), kernel_initializer='he_normal', name='fcn3_score')(fcn3)
	x = Add()([fcn3, x])

	# Do upsampling 16X (64,64)=>(512,512)
	x = Conv2DTranspose(n_classes, kernel_size=(16, 16), strides=(8, 8), use_bias=False, name='upscore_3')(x)
	x = Cropping2D(cropping=((4, 4),(4, 4)))(x)

	x = Activation('softmax')(x)
	
	model = Model(img_input, x)
	
	# model_shape = model.output_shape
	# print(model_shape)

	return model
adam_beta_1 = 0.5

init = initializers.RandomNormal(stddev=0.02)

# Generator network
generator = Sequential()

# FC: 2x2x512
generator.add(
    Dense(2 * 2 * 512, input_shape=(latent_dim, ), kernel_initializer=init))
generator.add(Reshape((2, 2, 512)))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))

# # Conv 1: 4x4x256
generator.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))

# Conv 2: 8x8x128
generator.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))

# Conv 3: 16x16x64
generator.add(Conv2DTranspose(64, kernel_size=5, strides=2, padding='same'))
generator.add(BatchNormalization())
generator.add(LeakyReLU(0.2))

# Conv 4: 32x32x3
generator.add(
示例#3
0
x = BatchNormalization(name='bn7')(x)
x = Dropout(0.5)(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), padding='same', name='conv8')(x)
x = BatchNormalization(name='bn8')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Conv2D(512, (3, 3), padding='same', name='conv9')(x)
x = BatchNormalization(name='bn9')(x)
x = Activation('relu')(x)
x = Dense(1024, activation='relu', name='fc1')(x)
x = Dense(1024, activation='relu', name='fc2')(x)

# Deconvolution Layers (BatchNorm after non-linear activation)

x = Conv2DTranspose(256, (3, 3), padding='same', name='deconv1')(x)
x = BatchNormalization(name='bn19')(x)
x = Activation('relu')(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(256, (3, 3), padding='same', name='deconv2')(x)
x = BatchNormalization(name='bn12')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (3, 3), padding='same', name='deconv3')(x)
x = BatchNormalization(name='bn13')(x)
x = Activation('relu')(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(128, (4, 4), padding='same', name='deconv4')(x)
x = BatchNormalization(name='bn14')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (3, 3), padding='same', name='deconv5')(x)
x = BatchNormalization(name='bn15')(x)
示例#4
0
def get_unet(img_rows=IMG_HEIGHT, img_cols=IMG_WIDTH):
    """"""
    inputs = Input((img_rows, img_cols, 1))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv1_1')(inputs)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv1_2')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2), name='pool1')(conv1)

    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv2_1')(pool1)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv2_2')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2), name='pool2')(conv2)

    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv3_1')(pool2)
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv3_2')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2), name='pool3')(conv3)

    conv4 = Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv4_1')(pool3)
    conv4 = Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv4_2')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2), name='pool4')(conv4)

    conv5 = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv5_1')(pool4)
    conv5 = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv5_2')(conv5)

    up6 = concatenate([
        Conv2DTranspose(256,
                        (3, 3), strides=(2, 2), name='convT_5')(conv5), conv4
    ],
                      axis=-1,
                      name='up_convT5_conv4')
    conv6 = Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv6_1')(up6)
    conv6 = Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv6_2')(conv6)

    up7 = concatenate([
        Conv2DTranspose(
            128, (2, 2), strides=(2, 2), padding='same',
            name='convT_6')(conv6), conv3
    ],
                      axis=-1,
                      name='up_convT6_conv3')
    conv7 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv7_1')(up7)
    conv7 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv7_2')(conv7)

    up8 = concatenate([
        Conv2DTranspose(
            64, (2, 2), strides=(2, 2), padding='same', name='convT_7')(conv7),
        conv2
    ],
                      axis=-1,
                      name='up_convT7_conv2')
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv8_1')(up8)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv8_2')(conv8)

    up9 = concatenate([
        Conv2DTranspose(
            32, (2, 2), strides=(2, 2), padding='same', name='convT_8')(conv8),
        conv1
    ],
                      axis=-1,
                      name='up_convT8_conv1')
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv9_1')(up9)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   name='conv9_2')(conv9)

    conv10 = Conv2D(1, (1, 1), activation='sigmoid',
                    name='conv10_sigmoid')(conv9)

    model = Model(inputs=[inputs], outputs=[conv10])

    model.compile(optimizer=RMSprop(lr=2e-4),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])
    # adam ls=1e-5, RMSprop(lr=2e-4)
    return model
def build_model(input_layer, start_neurons):
    # 128 -> 64
    conv1 = Conv2D(start_neurons * 1, (3, 3),
                   activation="relu",
                   padding="same")(input_layer)
    conv1 = Conv2D(start_neurons * 1, (3, 3),
                   activation="relu",
                   padding="same")(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    pool1 = Dropout(0.25)(pool1)

    # 64 -> 32
    conv2 = Conv2D(start_neurons * 2, (3, 3),
                   activation="relu",
                   padding="same")(pool1)
    conv2 = Conv2D(start_neurons * 2, (3, 3),
                   activation="relu",
                   padding="same")(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    pool2 = Dropout(0.5)(pool2)

    # 32 -> 16
    conv3 = Conv2D(start_neurons * 4, (3, 3),
                   activation="relu",
                   padding="same")(pool2)
    conv3 = Conv2D(start_neurons * 4, (3, 3),
                   activation="relu",
                   padding="same")(conv3)
    pool3 = MaxPooling2D((2, 2))(conv3)
    pool3 = Dropout(0.5)(pool3)

    # 16 -> 8
    conv4 = Conv2D(start_neurons * 8, (3, 3),
                   activation="relu",
                   padding="same")(pool3)
    conv4 = Conv2D(start_neurons * 8, (3, 3),
                   activation="relu",
                   padding="same")(conv4)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(0.5)(pool4)

    # Middle
    convm = Conv2D(start_neurons * 16, (3, 3),
                   activation="relu",
                   padding="same")(pool4)
    convm = Conv2D(start_neurons * 16, (3, 3),
                   activation="relu",
                   padding="same")(convm)

    # 8 -> 16
    deconv4 = Conv2DTranspose(start_neurons * 8, (3, 3),
                              strides=(2, 2),
                              padding="same")(convm)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(0.5)(uconv4)
    uconv4 = Conv2D(start_neurons * 8, (3, 3),
                    activation="relu",
                    padding="same")(uconv4)
    uconv4 = Conv2D(start_neurons * 8, (3, 3),
                    activation="relu",
                    padding="same")(uconv4)

    # 16 -> 32
    deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv4)
    uconv3 = concatenate([deconv3, conv3])
    uconv3 = Dropout(0.5)(uconv3)
    uconv3 = Conv2D(start_neurons * 4, (3, 3),
                    activation="relu",
                    padding="same")(uconv3)
    uconv3 = Conv2D(start_neurons * 4, (3, 3),
                    activation="relu",
                    padding="same")(uconv3)

    # 32 -> 64
    deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv3)
    uconv2 = concatenate([deconv2, conv2])
    uconv2 = Dropout(0.5)(uconv2)
    uconv2 = Conv2D(start_neurons * 2, (3, 3),
                    activation="relu",
                    padding="same")(uconv2)
    uconv2 = Conv2D(start_neurons * 2, (3, 3),
                    activation="relu",
                    padding="same")(uconv2)

    # 64 -> 128
    deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv2)
    uconv1 = concatenate([deconv1, conv1])
    uconv1 = Dropout(0.5)(uconv1)
    uconv1 = Conv2D(start_neurons * 1, (3, 3),
                    activation="relu",
                    padding="same")(uconv1)
    uconv1 = Conv2D(start_neurons * 1, (3, 3),
                    activation="relu",
                    padding="same")(uconv1)

    uncov1 = Dropout(0.5)(uconv1)
    output_layer = Conv2D(1, (1, 1), padding="same",
                          activation="sigmoid")(uconv1)

    return output_layer
示例#6
0
def unet_model(inL, outL):
    """
    Builds a 4-story UNET model
    :param inL: int, UNETs input image size [px]
    :param outL: int, UNETs output size [px]
    :return: model, the UNET model
    """

    d1 = 64
    d2 = 128
    d3 = 256
    d4 = 512
    d5 = 1024
    b_momentum = 0.99

    minS = inL
    for i in range(4):
        minS = (minS - 4) // 2
    minS = minS - 4
    # minS is the smallest size of layer at the bottom of the U, it's 22px for inL=476

    inS = minS
    for i in range(4):
        inS = (inS + 4) * 2
    inS = inS + 4

    upS = [inS - 4]
    for i in range(3):
        s = int(upS[-1] / 2 - 4)
        upS = np.append(upS, s)

    downS = minS
    downS = [int(downS * 2)]
    for i in range(3):
        s = int(downS[-1] - 4) * 2
        downS = np.append(downS, s)
    downS = np.flip(downS)
    lastS = downS[0] - 6
    crop = (upS - downS) / 2

    cropL = int((lastS - outL) / 2)
    cropL1 = int(crop[0])
    cropL2 = int(crop[1])
    cropL3 = int(crop[2])
    cropL4 = int(crop[3])

    inp = Input(shape=(inL, inL))
    inp1 = Reshape(target_shape=(inS, inS, 1))(inp)

    conv1 = Conv2D(d1, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(inp1)
    conv1 = BatchNormalization(momentum=b_momentum)(
        conv1)  # note that default bs=8 might be small for BN.
    #                                                         maybe try group norm.
    conv1 = ReLU()(conv1)
    conv1 = Conv2D(d1, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(conv1)
    conv1 = BatchNormalization(momentum=b_momentum)(conv1)
    conv1 = ReLU()(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(d2, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(pool1)
    conv2 = BatchNormalization(momentum=b_momentum)(conv2)
    conv2 = ReLU()(conv2)
    conv2 = Conv2D(d2, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(conv2)
    conv2 = BatchNormalization(momentum=b_momentum)(conv2)
    conv2 = ReLU()(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(d3, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(pool2)
    conv3 = BatchNormalization(momentum=b_momentum)(conv3)
    conv3 = ReLU()(conv3)
    conv3 = Conv2D(d3, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(conv3)
    conv3 = BatchNormalization(momentum=b_momentum)(conv3)
    conv3 = ReLU()(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(d4, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(pool3)
    conv4 = BatchNormalization(momentum=b_momentum)(conv4)
    conv4 = ReLU()(conv4)
    conv4 = Conv2D(d4, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(conv4)
    conv4 = BatchNormalization(momentum=b_momentum)(conv4)
    conv4 = ReLU()(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    convJ = SeparableConv2D(d5, (3, 3),
                            padding='valid',
                            kernel_initializer='glorot_normal')(pool4)
    convJ = BatchNormalization(momentum=b_momentum)(convJ)
    convJ = ReLU()(convJ)
    convJ = SeparableConv2D(d5, (3, 3),
                            padding='valid',
                            kernel_initializer='glorot_normal')(convJ)
    convJ = BatchNormalization(momentum=b_momentum)(convJ)
    convJ = ReLU()(convJ)

    up5 = Conv2DTranspose(d4, (2, 2), strides=(2, 2), padding='valid')(convJ)
    crop4 = Cropping2D(cropping=(cropL4, cropL4),
                       data_format="channels_last")(conv4)
    merge5 = concatenate([crop4, up5], axis=3)
    conv5 = Conv2D(d4, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(merge5)
    conv5 = BatchNormalization(momentum=b_momentum)(conv5)
    conv5 = ReLU()(conv5)
    conv5 = Conv2D(d4, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(conv5)
    conv5 = BatchNormalization(momentum=b_momentum)(conv5)
    conv5 = ReLU()(conv5)

    up6 = Conv2DTranspose(d3, (2, 2), strides=(2, 2), padding='valid')(conv5)
    crop3 = Cropping2D(cropping=(cropL3, cropL3),
                       data_format="channels_last")(conv3)
    merge6 = concatenate([crop3, up6], axis=3)
    conv6 = Conv2D(d3, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(merge6)
    conv6 = BatchNormalization(momentum=b_momentum)(conv6)
    conv6 = ReLU()(conv6)
    conv6 = Conv2D(d3, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(conv6)
    conv6 = BatchNormalization(momentum=b_momentum)(conv6)
    conv6 = ReLU()(conv6)

    up7 = Conv2DTranspose(d2, (2, 2), strides=(2, 2), padding='valid')(conv6)
    crop2 = Cropping2D(cropping=(cropL2, cropL2),
                       data_format="channels_last")(conv2)
    merge7 = concatenate([crop2, up7], axis=3)
    conv7 = Conv2D(d2, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(merge7)
    conv7 = BatchNormalization(momentum=b_momentum)(conv7)
    conv7 = ReLU()(conv7)
    conv7 = Conv2D(d2, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(conv7)
    conv7 = BatchNormalization(momentum=b_momentum)(conv7)
    conv7 = ReLU()(conv7)

    up8 = Conv2DTranspose(d1, (2, 2), strides=(2, 2), padding='valid')(conv7)
    crop1 = Cropping2D(cropping=(cropL1, cropL1),
                       data_format="channels_last")(conv1)
    merge8 = concatenate([crop1, up8], axis=3)
    conv8 = Conv2D(d1, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(merge8)
    conv8 = BatchNormalization(momentum=b_momentum)(conv8)
    conv8 = ReLU()(conv8)
    conv8 = Conv2D(d1, (3, 3),
                   padding='valid',
                   kernel_initializer='glorot_normal')(conv8)
    conv8 = BatchNormalization(momentum=b_momentum)(conv8)
    conv8 = ReLU()(conv8)

    output = Conv2D(1, (3, 3), padding='valid')(conv8)
    output = Cropping2D(cropping=(cropL, cropL),
                        data_format="channels_last")(output)
    output = Reshape((outL, outL))(output)

    model = Model(inputs=inp, outputs=output)

    return model
示例#7
0
    def build(self):
        inBlock = Input(shape=(self.input_h, self.input_w, 3), dtype='float32')
        # Lambda layer: scale input before feeding to the network
        inScaled = Lambda(lambda x: scale_input(x))(inBlock)

        # Block 1
        x = Conv2D(64, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(inScaled)
        x = Conv2D(64, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),)(x)
        f1 = x

        # Block 2
        x = Conv2D(128, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(128, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        f2 = x

        # Block 3
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        pool3 = x

        # Block 4
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool3)
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(256, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        pool4 = x

        # Block 5
        x = Conv2D(512, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool4)
        x = Conv2D(512, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = Conv2D(512, (3, 3), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        pool5 = x

        conv6 = Conv2D(2048, (7, 7), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool5)
        conv7 = Conv2D(2048, (1, 1), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(conv6)

        pool4_n = Conv2D(self.n_classes, (1, 1), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool4)
        u2 = Conv2DTranspose(self.n_classes, kernel_size=(2, 2), strides=(2, 2), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(conv7)
        # skip connection between pool_4(after 1x1 convolution) & conv7(upsampled 2 times)
        u2_skip = Add()([pool4_n, u2])

        pool3_n = Conv2D(self.n_classes, (1, 1), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(pool3)
        u4 = Conv2DTranspose(self.n_classes, kernel_size=(2, 2), strides=(2, 2), activation=self.activation, kernel_initializer=self.kernel_init, padding='same')(u2_skip)
        # skip connection between pool_3(after 1x1 convolution) & the result of the previous upsampling(again upsampled 4 times)
        u4_skip = Add()([pool3_n, u4])

        # Output layer
        outBlock = Conv2DTranspose(self.n_classes, kernel_size=(8, 8), strides=(8, 8), padding='same', activation='softmax')(u4_skip)

        # Create model
        model = Model(inputs=inBlock, outputs=outBlock, name=self.model_name)
        model.compile(optimizer=Adam(),
                      loss="categorical_crossentropy",
                      metrics=[dice, jaccard, ]
                      )

        # Load models_weights if pre-trained
        if self.pre_trained:
            if os.path.exists(self.weights_path):
                model.load_weights(self.weights_path)
            else:
                raise Exception(f'Failed to load weights at {self.weights_path}')

        return model
示例#8
0
model.add(Conv2D(32, (4,4)))
BatchNormalization()
model.add(MaxPooling2D((2,2)))

model.add(Conv2D(16, (3,3)))
BatchNormalization()
model.add(MaxPooling2D((2,2)))

model.add(Flatten())
model.add(Dense(256, activation='relu', kernel_regularizer='l2'))
model.add(Dense(64, activation='relu', kernel_regularizer='l2', name='latent'))
model.add(Dense(256, activation='relu', kernel_regularizer='l2'))
model.add(Reshape((8,8,4)))

model.add(Conv2DTranspose(16, (5,5), activation='relu'))
model.add(UpSampling2D((2,2), interpolation='bilinear'))
model.add(Conv2DTranspose(32, (6,6), activation='relu'))
model.add(UpSampling2D((2,2), interpolation='bilinear'))
model.add(Conv2DTranspose(32, (3,3), activation='relu'))
model.add(UpSampling2D((2,2), interpolation='bilinear'))
model.add(Conv2DTranspose(1, (9,9), activation='sigmoid'))

model.summary()
encoder = Model(inputs=model.input, outputs=model.get_layer('latent').output)
encoder.summary()

model.compile(optimizer='adam', loss='mse')

model.fit(X_train, X_train, batch_size=200, epochs=num_epochs, validation_split=.1)
示例#9
0
def upsample_conv(filters, kernel_size, strides, padding):
    return Conv2DTranspose(filters,
                           kernel_size,
                           strides=strides,
                           padding=padding)
示例#10
0
l = Dense(z_dim / 2, activation='relu')(l)
l = Dense(z_dim, activation='relu')(l)
lz = Multiply(name='lz')([z_h, l])
d_z = LeakyReLU(0.2)(lz)
z_en = Add()([z_in, d_z])

hidden = Model([z_in, l_in], z_en)
hidden.summary()

de_in = Input(shape=(z_dim, ))
z = de_in
z = Dense(np.prod(map_size) * z_dim, trainable=False)(z)
z = Reshape(map_size + (z_dim, ), trainable=False)(z)
z = Conv2DTranspose(z_dim / 2,
                    kernel_size=(5, 5),
                    strides=(2, 2),
                    padding='SAME',
                    trainable=False)(z)
z = BatchNormalization(trainable=False)(z)
z = Activation('relu', trainable=False)(z)
z = Conv2DTranspose(z_dim / 4,
                    kernel_size=(5, 5),
                    strides=(2, 2),
                    padding='SAME',
                    trainable=False)(z)
z = BatchNormalization(trainable=False)(z)
z = Activation('relu', trainable=False)(z)
z = Conv2DTranspose(z_dim / 8,
                    kernel_size=(5, 5),
                    strides=(2, 2),
                    padding='SAME',
def create_seg_model(seg_width, seg_height):
    seg_input = Input(shape=(seg_height, seg_width, 1), name='seg_input')

    conv1 = Conv2D(64, (3, 3), padding='same', name='share_conv1_1')
    conv1_seg = conv1(seg_input)
    x = BatchNormalization()(conv1_seg)
    x = Activation('relu')(x)

    x = Conv2D(64, (3, 3), padding='same', name='block1_conv2')(x)
    x = BatchNormalization()(x)
    block_1_out = Activation('relu')(x)

    x = MaxPooling2D()(block_1_out)

    # Block 2
    x = Conv2D(128, (3, 3), padding='same', name='block2_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(128, (3, 3), padding='same', name='block2_conv2')(x)
    x = BatchNormalization()(x)
    block_2_out = Activation('relu')(x)

    x = MaxPooling2D()(block_2_out)

    # Block 3
    x = Conv2D(256, (3, 3), padding='same', name='block3_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same', name='block3_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same', name='block3_conv3')(x)
    x = BatchNormalization()(x)
    block_3_out = Activation('relu')(x)

    x = MaxPooling2D()(block_3_out)

    # Block 4
    x = Conv2D(512, (3, 3), padding='same', name='block4_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block4_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block4_conv3')(x)
    x = BatchNormalization()(x)
    block_4_out = Activation('relu')(x)

    x = MaxPooling2D()(block_4_out)

    # Block 5
    x = Conv2D(512, (3, 3), padding='same', name='block5_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block5_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block5_conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 1
    x = Conv2DTranspose(512, (2, 2), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_4_out])
    x = Conv2D(512, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 2
    x = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_3_out])
    x = Conv2D(256, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 3
    x = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_2_out])
    x = Conv2D(128, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(128, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 4
    x = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = concatenate([x, block_1_out])
    x = Conv2D(64, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # last conv
    out_seg = Conv2D(2, (3, 3), activation='softmax', padding='same', name='seg_out')(x)
    # out_seg = Activation(activation='softmax', name='seg-out')(conv10)

    model = Model(inputs=[seg_input], outputs=[out_seg])

    # layer = Layer
    # idx = 0
    # for layer in model.layers:
    #     print('{}:{}'.format(idx, layer.name))
    #     idx = idx + 1
    # plot_model(model, to_file='pair-model_2.png', show_shapes=True)

    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=1e-4), metrics=[dice_coef])
    return model
input_shape = [X_train_4ch.shape[1], X_train_4ch.shape[2], X_train_4ch.shape[3]]


###########################
#### FIRST AUTOENCODER ####

X_input = Input(input_shape)
x = Conv2D(32, (5,1), activation='relu')(X_input)
x = Conv2D(64, (7,1), activation='relu')(x)
x = Conv2D(128, (3,3), activation='relu')(x)
x = MaxPooling2D(name='encoded1')(x)
ae1_enc_shape = x.shape.as_list()
print(ae1_enc_shape)
x = UpSampling2D()(x)
x = Conv2DTranspose(64, (3,3), activation='relu')(x)
x = Conv2DTranspose(32, (7,1), activation='relu')(x)
x = Conv2DTranspose(1, (5,1))(x)

ae1 = Model(input=X_input, output=x, name='ae1')
ae1.compile(loss='mse', optimizer='rmsprop')

ae1.summary()

# train the model, if not already trained
if not Path("weights-ae1-long-gaus.h5").is_file():
    history = ae1.fit(x = X_train_noise_4ch, y = X_train_4ch,
                        epochs=general_conf['iterations'],
                        batch_size=general_conf['batch_size'],
                        callbacks=callbacks('ae1-long-gaus', True),
                        validation_data=(X_test_noise_4ch, X_test_4ch))
 #layer1 = BatchNormalization()(layer1)                                               # 3
 layer1 = LeakyReLU(alpha=0.3)(layer1)                                               # 4
 #layer1 = Dropout(0.2)(layer1)                                                       # 5
 # 9x12 to 7x10
 layer2 = Conv2D(filters=64, kernel_size=3, strides=1, padding='valid')(layer1)      # 6
 #layer2 = BatchNormalization()(layer2)                                               # 7
 layer2 = LeakyReLU(alpha=0.3)(layer2)                                               # 8
 #layer2 = Dropout(0.2)(layer2)                                                       # 9
 # print(layer2.get_shape().as_list())
 layer3 = Flatten()(layer2)                                                          # 10
 layer3 = Dense(7*10*64)(layer3)                                                     # 11
 #layer3 = BatchNormalization()(layer3)                                               # 12
 layer3 = LeakyReLU(alpha=0.3)(layer3)                                               # 13
 #layer3 = Dropout(0.2)(layer3)                                                       # 14
 layer3 = Reshape((7, 10, 64))(layer3)                                               # 15
 layer4 = Conv2DTranspose(filters=32, kernel_size=3, strides=1, padding='valid')(layer3)     # 16
 #layer4 = BatchNormalization()(layer4)                                               # 17
 layer4 = LeakyReLU(alpha=0.3)(layer4)                                               # 18
 #layer4 = Dropout(0.2)(layer4)                                                       # 19
 layer4 = Conv2DTranspose(filters=2, kernel_size=(4, 5), strides=2,
                               padding='valid', activation='linear')(layer4)         # 20
 # add sc input
 layer4 = Flatten()(layer4)
 sc_input = Input(shape=(1,), dtype='int32')
 sc_layer = Embedding(12, 64, input_length=1)(sc_input)
 sc_layer = Flatten()(sc_layer)
 sc_layer = Dense(20*27*2)(sc_layer)
 # sc_layer = BatchNormalization()(sc_layer)  # ?
 sc_layer = LeakyReLU(alpha=0.3)(sc_layer)  # ?
 #sc_layer = Dropout(0.2)(sc_layer)
 layer5 = concatenate([layer4, sc_layer])
示例#14
0
def main():

	x_test = read_data(sys.argv[1]+'/test')
	y_label = read_label(sys.argv[1]+'/test.csv')
	x_test = x_test.reshape((x_test.shape[0],) + original_img_size)

	"""
	Bulid Model
	"""

	#####################
	#		Encoder		#
	#####################

	x = Input(shape=original_img_size)
	conv_1 = Conv2D(img_chns,
					kernel_size=(2, 2),
					padding='same', activation='relu')(x)
	conv_2 = Conv2D(filters,
					kernel_size=(2, 2),
					padding='same', activation='relu',
					strides=(2, 2))(conv_1)
	conv_3 = Conv2D(filters,
					kernel_size=num_conv,
					padding='same', activation='relu',
					strides=1)(conv_2)
	conv_4 = Conv2D(filters,
					kernel_size=num_conv,
					padding='same', activation='relu',
					strides=1)(conv_3)
	
	flat = Flatten()(conv_4)

	z_mean = Dense(latent_dim)(flat)
	z_log_var = Dense(latent_dim)(flat)


	np.random.seed(0)
	def sampling(args):
		z_mean, z_log_var = args
		epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
								  mean=0., stddev=epsilon_std)
		return z_mean + K.exp(z_log_var/2) * epsilon

	z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])

	#####################
	#		Decoder		#
	#####################

	decoder_upsample = Dense(filters * 32 * 32, activation='relu')

	output_shape = (batch_size, 32, 32, filters)

	decoder_reshape = Reshape(output_shape[1:])
	decoder_deconv_1 = Conv2DTranspose(filters,
									   kernel_size=num_conv,
									   padding='same',
									   strides=1,
									   activation='relu')
	decoder_deconv_2 = Conv2DTranspose(filters,
									   kernel_size=num_conv,
									   padding='same',
									   strides=1,
									   activation='relu')

	decoder_deconv_3_upsamp = Conv2DTranspose(filters,
											  kernel_size=(3, 3),
											  strides=(2, 2),
											  padding='valid',
											  activation='relu')
	decoder_mean_squash = Conv2D(img_chns,
								 kernel_size=2,
								 padding='valid',
								 activation='sigmoid')

	up_decoded = decoder_upsample(z)
	reshape_decoded = decoder_reshape(up_decoded)
	deconv_1_decoded = decoder_deconv_1(reshape_decoded)
	deconv_2_decoded = decoder_deconv_2(deconv_1_decoded)
	x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded)
	x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu)

	#####################
	#		Loss		#
	#####################

	lambda_kl = 3e-5
	mse_loss = K.mean(K.square(K.flatten(x) - K.flatten(x_decoded_mean_squash)))
	kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
	vae_loss = mse_loss + kl_loss * lambda_kl 

	def loss1 (args):
		x, x_decoded_mean_squash = args
		mse_loss = K.mean(K.square(K.flatten(x) - K.flatten(x_decoded_mean_squash)))
		return mse_loss

	def loss2 (args):
		x, x_decoded_mean_squash = args
		kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
		return kl_loss

	loss1 = Lambda(loss1)([x, x_decoded_mean_squash])
	loss2 = Lambda(loss2)([x, x_decoded_mean_squash])

	vae = Model(x, [x_decoded_mean_squash, loss1, loss2])
	
	vae.add_loss(vae_loss)

	#vae.summary()

	vae.load_weights('./vae_weights.h5')

	#############################
	#		Reconstruct 		#
	#############################

	y_test = vae.predict(x_test)[0]
	for i in range(1,11):
		plt.subplot(2, 10, i)
		plt.imshow(x_test[i])
		plt.axis('off')
		plt.subplot(2, 10, i + 10)
		plt.imshow(y_test[i])
		plt.axis('off')
	plt.savefig(sys.argv[2]+'/figure1_3.jpg')
	plt.close()

	print('reconstruct.png saved')

	#############################
	#		Generated_pics		#
	#############################

	decoder_input = Input(shape=(latent_dim,))
	_up_decoded = decoder_upsample(decoder_input)
	_reshape_decoded = decoder_reshape(_up_decoded)
	_deconv_1_decoded = decoder_deconv_1(_reshape_decoded)
	_deconv_2_decoded = decoder_deconv_2(_deconv_1_decoded)
	_x_decoded_relu = decoder_deconv_3_upsamp(_deconv_2_decoded)
	_x_decoded_mean_squash = decoder_mean_squash(_x_decoded_relu)
	generator = Model(decoder_input, _x_decoded_mean_squash)

	np.random.seed(87)
	random_input = np.random.random((32, latent_dim))
	generated_pics = generator.predict(random_input)
	generated_pics = generated_pics.reshape(-1, 64, 64, 3)

	for i in range(1,33):
		plt.subplot(4, 8, i)
		plt.imshow(generated_pics[i-1])
		plt.axis('off')
	plt.savefig(sys.argv[2]+'/figure1_4.jpg')
	plt.close()

	print('generated_pics.png saved')

	#############################
	#		TSNE_pics			#
	#############################

	x = Input(shape=original_img_size)
	conv_1 = Conv2D(img_chns,
					kernel_size=(2, 2),
					padding='same', activation='relu')(x)
	conv_2 = Conv2D(filters,
					kernel_size=(2, 2),
					padding='same', activation='relu',
					strides=(2, 2))(conv_1)
	conv_3 = Conv2D(filters,
					kernel_size=num_conv,
					padding='same', activation='relu',
					strides=1)(conv_2)
	conv_4 = Conv2D(filters,
					kernel_size=num_conv,
					padding='same', activation='relu',
					strides=1)(conv_3)
	
	flat = Flatten()(conv_4)

	z_mean = Dense(latent_dim)(flat)
	z_log_var = Dense(latent_dim)(flat)
	z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
	Encoder = Model(x, z)
	latent = Encoder.predict(x_test)

	y_label = y_label[:, 12]
	x_tsne = TSNE(n_components=2,learning_rate=10).fit_transform(latent)

	plt.scatter(x_tsne[:, 0], x_tsne[:, 1], c=y_label, s=5)
	plt.xlim(-10, 10)
	plt.ylim(-10, 10)
	plt.savefig(sys.argv[2]+'/figure1_5.jpg')

	print('tSNE.png saved')
示例#15
0
inputs = Input(input_shape)
conv1 = Conv2D(16, kernel_size=(3, 3), padding='same',
               activation='relu')(inputs)
conv2 = Conv2D(32, kernel_size=(3, 3), padding='same',
               activation='relu')(conv1)
conv3 = Conv2D(64, kernel_size=(3, 3), padding='same',
               activation='relu')(conv2)
f = Flatten()(conv3)
d1 = Dense(128, activation='relu')(f)
d2 = Dense(256, activation='relu')(d1)
d3 = Dense(512, activation='relu')(d2)
d4 = Dense(32 * 32 * 30, activation='relu')(d3)
rs = Reshape((32, 32, 30))(d4)
up = UpSampling2D((3, 3))(rs)
tpconv1 = Conv2DTranspose(64,
                          kernel_size=(3, 3),
                          padding='same',
                          activation='relu')(up)
tpconv2 = Conv2DTranspose(32,
                          kernel_size=(3, 3),
                          padding='same',
                          activation='relu')(tpconv1)
tpconv3 = Conv2DTranspose(16,
                          kernel_size=(3, 3),
                          padding='same',
                          activation='relu')(tpconv2)
out = Conv2DTranspose(3, kernel_size=(3, 3), padding='same',
                      activation='relu')(tpconv3)

model = Model(inputs, out, name='Enlarger')
model.summary()
model.compile(loss='mse',
示例#16
0
    def compose_model(self):
        # Initial Block
        inp = Input(shape=self.vars.INP_SHAPE)
        x = Conv2D(filters=13,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   padding='same')(inp)
        side = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(inp)
        x = Concatenate()([x, side])
        x = BatchNormalization()(x)
        x = PReLU(shared_axes=[1, 2])(x)

        # block 1
        x = self.RDDNeck(x, 64, True, dilation=1, keep_probs=0.01)
        x = self.RDDNeck(x, 64, False, dilation=1, keep_probs=0.01)
        x = self.RDDNeck(x, 64, False, dilation=1, keep_probs=0.01)
        x = self.RDDNeck(x, 64, False, dilation=1, keep_probs=0.01)
        x = self.RDDNeck(x, 64, False, dilation=1, keep_probs=0.01)

        #block 2
        x = self.RDDNeck(x, 128, True, dilation=1)
        x = self.RDDNeck(x, 128, False, dilation=1)
        x = self.RDDNeck(x, 128, False, dilation=2)
        x = self.ASNeck(x, 128)
        x = self.RDDNeck(x, 128, False, dilation=4)
        x = self.RDDNeck(x, 128, False, dilation=1)
        x = self.RDDNeck(x, 128, False, dilation=8)
        x = self.ASNeck(x, 128)
        x = self.RDDNeck(x, 128, False, dilation=16)

        #block 3
        x = self.RDDNeck(x, 128, False, dilation=1)
        x = self.RDDNeck(x, 128, False, dilation=2)
        x = self.ASNeck(x, 128)
        x = self.RDDNeck(x, 128, False, dilation=4)
        x = self.RDDNeck(x, 128, False, dilation=1)
        x = self.RDDNeck(x, 128, False, dilation=8)
        x = self.ASNeck(x, 128)
        x = self.RDDNeck(x, 128, False, dilation=16)

        # block 4
        x = self.RDDNeck(x, 256, False, dilation=1)
        x = self.RDDNeck(x, 256, False, dilation=2)
        x = self.ASNeck(x, 256)
        x = self.RDDNeck(x, 256, False, dilation=4)
        x = self.RDDNeck(x, 256, False, dilation=1)
        x = self.RDDNeck(x, 256, False, dilation=8)
        x = self.ASNeck(x, 256)
        x = self.RDDNeck(x, 256, False, dilation=16)

        #block 4
        x = self.UBNeck(x, 64)
        x = self.RDDNeck(x, 64, False, dilation=1)
        x = self.RDDNeck(x, 64, False, dilation=1)

        #block 5
        x = self.UBNeck(x, 16)
        x = self.RDDNeck(x, 16, False, dilation=1)

        out = Conv2DTranspose(filters=self.vars.LOGO_NUM_CLASSES,
                              kernel_size=(3, 3),
                              strides=(2, 2),
                              use_bias=False,
                              output_padding=1,
                              padding='same')(x)
        out = Reshape((self.vars.INP_SHAPE[0] * self.vars.INP_SHAPE[1],
                       self.vars.LOGO_NUM_CLASSES))(out)
        out = Activation('softmax')(out)

        model = Model(inputs=inp, outputs=out)
        model.compile(loss='categorical_crossentropy', optimizer=Adam())

        return model
def unet_model(n_classes=5,
               im_sz=160,
               n_channels=8,
               n_filters_start=32,
               growth_factor=2,
               upconv=True,
               class_weights=[0.2, 0.3, 0.1, 0.1, 0.3]):
    droprate = 0.25
    n_filters = n_filters_start
    inputs = Input((im_sz, im_sz, n_channels))
    #inputs = BatchNormalization()(inputs)
    conv1 = Conv2D(n_filters, (3, 3), activation='relu',
                   padding='same')(inputs)
    conv1 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    #pool1 = Dropout(droprate)(pool1)

    n_filters *= growth_factor
    pool1 = BatchNormalization()(pool1)
    conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    pool2 = Dropout(droprate)(pool2)

    n_filters *= growth_factor
    pool2 = BatchNormalization()(pool2)
    conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    pool3 = Dropout(droprate)(pool3)

    n_filters *= growth_factor
    pool3 = BatchNormalization()(pool3)
    conv4_0 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(pool3)
    conv4_0 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_0)
    pool4_1 = MaxPooling2D(pool_size=(2, 2))(conv4_0)
    pool4_1 = Dropout(droprate)(pool4_1)

    n_filters *= growth_factor
    pool4_1 = BatchNormalization()(pool4_1)
    conv4_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(pool4_1)
    conv4_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv4_1)
    pool4_2 = MaxPooling2D(pool_size=(2, 2))(conv4_1)
    pool4_2 = Dropout(droprate)(pool4_2)

    n_filters *= growth_factor
    conv5 = Conv2D(n_filters, (3, 3), activation='relu',
                   padding='same')(pool4_2)
    conv5 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv5)

    n_filters //= growth_factor
    if upconv:
        up6_1 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv5), conv4_1
        ])
    else:
        up6_1 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4_1])
    up6_1 = BatchNormalization()(up6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(up6_1)
    conv6_1 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_1)
    conv6_1 = Dropout(droprate)(conv6_1)

    n_filters //= growth_factor
    if upconv:
        up6_2 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv6_1), conv4_0
        ])
    else:
        up6_2 = concatenate([UpSampling2D(size=(2, 2))(conv6_1), conv4_0])
    up6_2 = BatchNormalization()(up6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(up6_2)
    conv6_2 = Conv2D(n_filters, (3, 3), activation='relu',
                     padding='same')(conv6_2)
    conv6_2 = Dropout(droprate)(conv6_2)

    n_filters //= growth_factor
    if upconv:
        up7 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv6_2), conv3
        ])
    else:
        up7 = concatenate([UpSampling2D(size=(2, 2))(conv6_2), conv3])
    up7 = BatchNormalization()(up7)
    conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv7)
    conv7 = Dropout(droprate)(conv7)

    n_filters //= growth_factor
    if upconv:
        up8 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv7), conv2
        ])
    else:
        up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    up8 = BatchNormalization()(up8)
    conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv8)
    conv8 = Dropout(droprate)(conv8)

    n_filters //= growth_factor
    if upconv:
        up9 = concatenate([
            Conv2DTranspose(n_filters, (2, 2), strides=(2, 2),
                            padding='same')(conv8), conv1
        ])
    else:
        up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Conv2D(n_filters, (3, 3), activation='relu', padding='same')(conv9)

    conv10 = Conv2D(n_classes, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    def weighted_binary_crossentropy(y_true, y_pred):
        class_loglosses = K.mean(K.binary_crossentropy(y_true, y_pred),
                                 axis=[0, 1, 2])
        return K.sum(class_loglosses * K.constant(class_weights))

    model.compile(optimizer=Adam(),
                  loss=weighted_binary_crossentropy,
                  metrics=['mae', 'acc'])
    return model
示例#18
0
    def _change_judge_layer(self, inputs, diff_fea_1, diff_fea_2, diff_fea_3,
                            diff_fea_4):
        # (B, H/16, W/16, 128) --> (B, H/8, W/8, 64)
        layer_1 = Conv2DTranspose(128,
                                  2,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(
                                      UpSampling2D(size=(2, 2))(inputs))

        # attention_1 = self.Attention_layer(layer_1)
        #  diff_fea_4 = Multiply()([attention_1, diff_fea_4])
        concat_layer_1 = Concatenate()([layer_1, diff_fea_4])

        # layer_1 = Conv2D(128, 3, strides=[1, 1], activation='relu', padding='same', kernel_initializer='he_normal')(
        #     concat_layer_1)

        layer_1 = Conv2D(128,
                         3,
                         strides=[1, 1],
                         activation='relu',
                         padding='same',
                         kernel_initializer='he_normal')(concat_layer_1)
        # layer_1 = BatchNormalization()(layer_1)
        layer_1 = Dropout(0.5)(layer_1)
        layer_1 = Conv2D(64,
                         3,
                         strides=[1, 1],
                         activation='relu',
                         padding='same',
                         kernel_initializer='he_normal')(layer_1)

        # (B, H/8, W/8, 64) --> (B, H/4, W/4, 32)
        layer_2 = Conv2DTranspose(64,
                                  2,
                                  strides=[1, 1],
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(
                                      UpSampling2D(size=(2, 2))(layer_1))

        # attention_2 = self.Attention_layer(layer_2)
        # diff_fea_3 = Multiply()([attention_2, diff_fea_3])
        concat_layer_2 = Concatenate()([layer_2, diff_fea_3])

        # layer_2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(
        #     concat_layer_2)
        layer_2 = Conv2D(64,
                         3,
                         activation='relu',
                         padding='same',
                         kernel_initializer='he_normal')(concat_layer_2)
        # layer_2 = BatchNormalization()(layer_2)
        layer_2 = Conv2D(32,
                         3,
                         activation='relu',
                         padding='same',
                         kernel_initializer='he_normal')(layer_2)
        drop_layer_2 = Dropout(0.4)(layer_2)
        # (B, H/4, W/4, 32) --> (B, H/2, W/2, 16)
        layer_3 = Conv2DTranspose(32,
                                  2,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(
                                      UpSampling2D(size=(2, 2))(drop_layer_2))

        # attention_3 = self.Attention_layer(layer_3)
        # diff_fea_2 = Multiply()([attention_3, diff_fea_2])
        concat_layer_3 = Concatenate()([layer_3, diff_fea_2])

        layer_3 = Conv2D(32,
                         3,
                         activation='relu',
                         padding='same',
                         kernel_initializer='he_normal')(concat_layer_3)
        # layer_3 = BatchNormalization()(layer_3)
        layer_3 = Conv2D(16,
                         3,
                         activation='relu',
                         padding='same',
                         kernel_initializer='he_normal')(layer_3)
        drop_layer_3 = Dropout(0.3)(layer_3)
        # (B, H/2, W/2, 16) --> (B, H, W, 1)
        layer_4 = Conv2DTranspose(16,
                                  2,
                                  activation='relu',
                                  padding='same',
                                  kernel_initializer='he_normal')(
                                      UpSampling2D(size=(2, 2))(drop_layer_3))

        # attention_4 = self.Attention_layer(layer_4)
        # diff_fea_1 = Multiply()([attention_4, diff_fea_1])
        concat_layer_4 = Concatenate()([layer_4, diff_fea_1])
        # drop_layer_4 = Dropout(0.2)(concat_layer_4)
        # layer_4 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(
        #     concat_layer_4)
        # layer_3 = BatchNormalization()(layer_3)
        layer_4 = Conv2D(16,
                         3,
                         activation='relu',
                         padding='same',
                         kernel_initializer='he_normal')(concat_layer_4)
        logits = Conv2D(1,
                        3,
                        activation='sigmoid',
                        padding='same',
                        kernel_initializer='he_normal')(layer_4)
        logits = Lambda(self.squeeze)(logits)
        return logits