Пример #1
0
def UResNet34(input_shape=(256, 256, 1),
              classes=1,
              decoder_filters=32,
              decoder_block_type='upsampling',
              encoder_weights="imagenet",
              input_tensor=None,
              activation='sigmoid',
              **kwargs):
    backbone = ResnetBuilder.build_resnet_34(input_shape=input_shape,
                                             input_tensor=input_tensor)

    input_layer = backbone.input  # input = backbone.input
    output_layer = build_model(input_layer, 32, 0.6)  # x
    model = Model(input_layer, output_layer)
    c = optimizers.adam(lr=0.01)
    model.compile(loss='binary_crossentropy',
                  optimizer=c,
                  metrics=[my_iou_metric
                           ])  # bce_dice_loss  binary_crossentropy
    model.name = 'u-resnet34'
    return model
gan_output = core_generator.model(gan_x)

# 从判别器中提取特征和预测值
disc_input = concatenate([gan_x, gan_output], axis=-1)
pred_full, features_full = discriminator_full.model(disc_input)
pred_medium, features_medium = discriminator_medium.model(disc_input)
pred_low, features_low = discriminator_low.model(disc_input)

# GAN网络编译
gan_core = Model(inputs=gan_x,
                 outputs=[
                     gan_output, features_full, features_medium, features_low,
                     pred_full, pred_medium, pred_low
                 ])

gan_core.name = "gan_core"
optimizer = Adam(learning_rate, 0.5, decay=decay_rate)
loss_gan = ['mae', 'mae', 'mae', 'mae', 'mse', 'mse', 'mse']
loss_weights_gan = [1, 3.33, 3.33, 3.33, 0.33, 0.33, 0.33]

# gan_core = multi_gpu_model(gan_core_org)
gan_core.compile(optimizer=optimizer,
                 loss_weights=loss_weights_gan,
                 loss=loss_gan)

# --------------------------------
#  编译判别器Discriminator
# --------------------------------

discriminator_full.model.trainable = True
discriminator_medium.model.trainable = True
Пример #3
0
def UEfficientNet(input_shape=(None, None, 3), dropout_rate=0.1):
    """U-Net based model with EfficientNet-B4 encoder"""

    backbone = EfficientNetB4(weights='imagenet',
                              include_top=False,
                              input_shape=input_shape)
    input = backbone.input
    start_neurons = 8

    conv4 = backbone.layers[342].output
    conv4 = LeakyReLU(alpha=0.1)(conv4)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(dropout_rate)(pool4)

    # Middle
    convm = Conv2D(start_neurons * 32, (3, 3),
                   activation=None,
                   padding="same",
                   name='conv_middle')(pool4)
    convm = residual_block(convm, start_neurons * 32)
    convm = residual_block(convm, start_neurons * 32)
    convm = LeakyReLU(alpha=0.1)(convm)

    deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3),
                              strides=(2, 2),
                              padding="same")(convm)
    deconv4_up1 = Conv2DTranspose(start_neurons * 16, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv4)
    deconv4_up2 = Conv2DTranspose(start_neurons * 16, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv4_up1)
    deconv4_up3 = Conv2DTranspose(start_neurons * 16, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv4_up2)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(dropout_rate)(uconv4)

    uconv4 = Conv2D(start_neurons * 16, (3, 3),
                    activation=None,
                    padding="same")(uconv4)
    uconv4 = residual_block(uconv4, start_neurons * 16)
    uconv4 = LeakyReLU(alpha=0.1)(uconv4)  #conv1_2

    deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv4)
    deconv3_up1 = Conv2DTranspose(start_neurons * 8, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv3)
    deconv3_up2 = Conv2DTranspose(start_neurons * 8, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv3_up1)
    conv3 = backbone.layers[154].output
    uconv3 = concatenate([deconv3, deconv4_up1, conv3])
    uconv3 = Dropout(dropout_rate)(uconv3)

    uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                    padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 8)
    uconv3 = LeakyReLU(alpha=0.1)(uconv3)

    deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv3)
    deconv2_up1 = Conv2DTranspose(start_neurons * 4, (3, 3),
                                  strides=(2, 2),
                                  padding="same")(deconv2)
    conv2 = backbone.layers[92].output
    uconv2 = concatenate([deconv2, deconv3_up1, deconv4_up2, conv2])

    uconv2 = Dropout(0.1)(uconv2)
    uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                    padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 4)
    uconv2 = LeakyReLU(alpha=0.1)(uconv2)

    deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv2)
    conv1 = backbone.layers[30].output
    uconv1 = concatenate(
        [deconv1, deconv2_up1, deconv3_up2, deconv4_up3, conv1])

    uconv1 = Dropout(0.1)(uconv1)
    uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                    padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 2)
    uconv1 = LeakyReLU(alpha=0.1)(uconv1)

    uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3),
                             strides=(2, 2),
                             padding="same")(uconv1)
    uconv0 = Dropout(0.1)(uconv0)
    uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                    padding="same")(uconv0)
    uconv0 = residual_block(uconv0, start_neurons * 1)
    uconv0 = LeakyReLU(alpha=0.1)(uconv0)

    uconv0 = Dropout(dropout_rate / 2)(uconv0)
    output_layer = Conv2D(1, (1, 1), padding="same",
                          activation="sigmoid")(uconv0)

    model = Model(input, output_layer)
    model.name = 'u-xception'

    return model
Пример #4
0
    def __init__(self,
                 width=256,
                 height=256,
                 channels=1,
                 gpus = 0):
        
        self.width = width
        self.height = height
        self.channels = channels
        self.gpus = gpus
        self.gf = 64

        
        # -------------------------------------------------------------------------------------
        #  Core Generator 
        #  The U-net structure is from Erik Linder-Noren's brilliant pix2pix model
        #  Source: https://github.com/eriklindernoren/Keras-GAN/blob/master/pix2pix/pix2pix.py
        #  Modifications: Thinner to enable 128x128 images, Spectral Normalization and 
        #  an Attention layer. 
        # -------------------------------------------------------------------------------------
        

        def conv2d(layer_input, filters, f_size=4):
            """下采样时用到的层"""
            d = ConvSN2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)

            return d

        def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
            """上采样时用到的层"""
            u = UpSampling2D(size=2)(layer_input)
            u = ConvSN2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
            if dropout_rate:
                u = Dropout(dropout_rate)(u)
            u = Concatenate()([u, skip_input])
            return u

        # 输入图像
        d1 = Input(shape=(width, height, channels))

        # 下采样
        d2 = conv2d(d1, self.gf*2)
        d3 = conv2d(d2, self.gf*4)
        d4 = conv2d(d3, self.gf*8)
        d5 = conv2d(d4, self.gf*8)
        d6 = conv2d(d5, self.gf*8)
        d7 = conv2d(d6, self.gf*8)

        # 上采样
        u1 = deconv2d(d7, d6, self.gf*8)
        u2 = deconv2d(u1, d5, self.gf*8)
        u3 = deconv2d(u2, d4, self.gf*8)
        u4 = deconv2d(u3, d3, self.gf*4)
        u4_att = Attention(512)(u4)
        u5 = deconv2d(u4_att, d2, self.gf*2)

        u6 = UpSampling2D(size=2)(u5)
        output = ConvSN2D(2, kernel_size=(7,7), strides=1, padding='same', activation='tanh')(u6)
        
        core_generator = Model(d1, output)
        core_generator.name = "core_generator"
        
        # --------------
        #  保存模型
        # --------------
        
        if self.gpus < 2:
            self.model = core_generator
            self.save_model = self.model
        else:
            self.save_model = core_generator
            self.model = multi_gpu_model(self.save_model, gpus=gpus)