Пример #1
0
def conv_bn_relu(image_in, mask_in, filters, kernel_size, 
                 downsampling=1, upsampling=1, act="relu",
                 concat_img=None, concat_mask=None, reps=1):
    assert not (concat_img is None)^(concat_mask is None) # XORは常にFalse
    # Upsamplingする場合
    if upsampling > 1:
        conv = layers.Lambda(upsampling2d_tpu, arguments={"scale":upsampling})(image_in)
        mask = layers.Lambda(upsampling2d_tpu, arguments={"scale":upsampling})(mask_in)
    else:
        conv, mask = image_in, mask_in
    if concat_img is not None and concat_mask is not None:
        conv = layers.Concatenate()([conv, concat_img])
        mask = layers.Concatenate()([mask, concat_mask])
        # 計算量削減のために1x1 Convを入れる
        conv, mask = PConv2D(filters=filters, kernel_size=1)([conv, mask])
        conv = layers.BatchNormalization()(conv)
        conv = layers.Activation("relu")(conv)

    for i in range(reps):
        stride = downsampling if i == 0 else 1
        # strideでダウンサンプリング
        conv, mask = PConv2D(filters=filters, kernel_size=kernel_size, 
                             padding="same", strides=stride)([conv, mask])
        # Image側だけBN->ReLUを入れる
        conv = layers.BatchNormalization()(conv)
        if act == "relu":
            conv = layers.Activation("relu")(conv)
        elif act == "prelu":
            conv = layers.PReLU()(conv)
        elif act == "custom_tanh":
            # 元の画像の白を黒に変えるには、tanhの2倍のスケール[-2,2]が必要
            conv = layers.Lambda(lambda x: 2*K.tanh(x), name="unmasked")(conv)
    return conv, mask
Пример #2
0
 def encoder_layer(img_in, mask_in, filters, kernel_size, bn=True):
     conv, mask = PConv2D(filters, kernel_size, strides=2, padding='same')([img_in, mask_in])
     if bn:
         conv = BatchNormalization(name='EncBN'+str(encoder_layer.counter))(conv, training=train_bn)
     conv = Activation('relu')(conv)
     encoder_layer.counter += 1
     return conv, mask
        def decoder_layer(img_in, mask_in, e_conv, e_mask, filters, bn=True):
            up_img = UpSampling2D(size=(2,2))(img_in)
            up_mask = UpSampling2D(size=(2,2))(mask_in)
            concat_img = Concatenate(axis=3)([e_conv,up_img])
            concat_mask = Concatenate(axis=3)([e_mask,up_mask])
            conva, maska = PConv2D(filters=64, kernel_size=1, padding='same')([concat_img, concat_mask])
            conva, maska = PConv2D(filters, kernel_size=5, padding='same')([conva, maska])
            convb, maskb = PConv2D(filters=64, kernel_size=1, padding='same')([concat_img, concat_mask])
            convb, maskb = PConv2D(filters, kernel_size=3, padding='same')([convb, maskb])
            convc, maskc = PConv2D(filters, kernel_size=1, padding='same')([concat_img, concat_mask])
            conv = Concatenate(axis=3)([conva, convb, convc])
            mask = Concatenate(axis=3)([maska, maskb, maskc])

            if bn:
                conv = BatchNormalization()(conv)
            conv = LeakyReLU(alpha=0.2)(conv)
            return conv, mask
Пример #4
0
 def mod_decoder_layer(img_in,
                       mask_in,
                       e_conv,
                       e_mask,
                       filters,
                       kernel_size,
                       bn=True):
     up_img = UpSampling2D(size=(1, 1))(img_in)
     up_mask = UpSampling2D(size=(1, 1))(mask_in)
     concat_img = Concatenate(axis=3)([e_conv, up_img])
     concat_mask = Concatenate(axis=3)([e_mask, up_mask])
     conv, mask = PConv2D(filters, kernel_size,
                          padding='same')([concat_img, concat_mask])
     if bn:
         conv = BatchNormalization()(conv)
     conv = LeakyReLU(alpha=0.2)(conv)
     return conv, mask
Пример #5
0
 def decoder_layer(img_in,
                   mask_in,
                   e_conv,
                   e_mask,
                   filters,
                   kernel_size,
                   bn=True):
     rows_ratio = float(int(e_conv.shape[1])) / float(
         int(img_in.shape[1]))
     cols_ratio = float(int(e_conv.shape[2])) / float(
         int(img_in.shape[2]))
     up_img = UpSampling2D(size=(rows_ratio, cols_ratio))(img_in)
     up_mask = UpSampling2D(size=(rows_ratio, cols_ratio))(mask_in)
     concat_img = Concatenate(axis=3)([e_conv, up_img])
     concat_mask = Concatenate(axis=3)([e_mask, up_mask])
     conv, mask = PConv2D(filters, kernel_size,
                          padding='same')([concat_img, concat_mask])
     if bn:
         conv = BatchNormalization()(conv)
     conv = LeakyReLU(alpha=0.2)(conv)
     return conv, mask
Пример #6
0
    def unet_model(self, imgs_shape, msks_shape, dropout=0.2, final=False):
        """
        U-Net Model
        ===========
        Based on https://arxiv.org/abs/1505.04597
        The default uses UpSampling2D (bilinear interpolation) in
        the decoder path. The alternative is to use Transposed
        Convolution.
        """

        if not final:
            if self.use_upsampling:
                print("Using UpSampling2D")
            else:
                print("Using Transposed Deconvolution")

        num_chan_in = imgs_shape[self.concat_axis]
        num_chan_out = msks_shape[self.concat_axis]

        # You can make the network work on variable input height and width
        # if you pass None as the height and width
        #         if self.channels_first:
        #             self.input_shape = [num_chan_in, None, None]
        #         else:
        #             self.input_shape = [None, None, num_chan_in]

        self.input_shape = imgs_shape

        self.num_input_channels = num_chan_in

        inputs = K.layers.Input(self.input_shape, name="MRImages")

        # Convolution parameters
        params = dict(kernel_size=(3, 3),
                      activation="relu",
                      padding="same",
                      kernel_initializer="he_uniform")

        # Transposed convolution parameters
        params_trans = dict(kernel_size=(2, 2), strides=(2, 2), padding="same")

        encodeA = PConv2D(name="encodeAa", filters=self.fms, **params)(inputs)
        encodeA = PConv2D(name="encodeAb", filters=self.fms, **params)(encodeA)
        poolA = K.layers.MaxPooling2D(name="poolA", pool_size=(2, 2))(encodeA)

        encodeB = PConv2D(name="encodeBa", filters=self.fms * 2,
                          **params)(poolA)
        encodeB = PConv2D(name="encodeBb", filters=self.fms * 2,
                          **params)(encodeB)
        poolB = K.layers.MaxPooling2D(name="poolB", pool_size=(2, 2))(encodeB)

        encodeC = PConv2D(name="encodeCa", filters=self.fms * 4,
                          **params)(poolB)
        if self.use_dropout:
            encodeC = K.layers.SpatialDropout2D(dropout)(encodeC)
        encodeC = PConv2D(name="encodeCb", filters=self.fms * 4,
                          **params)(encodeC)

        poolC = K.layers.MaxPooling2D(name="poolC", pool_size=(2, 2))(encodeC)

        encodeD = PConv2D(name="encodeDa", filters=self.fms * 8,
                          **params)(poolC)
        if self.use_dropout:
            encodeD = K.layers.SpatialDropout2D(dropout)(encodeD)
        encodeD = PConv2D(name="encodeDb", filters=self.fms * 8,
                          **params)(encodeD)

        poolD = K.layers.MaxPooling2D(name="poolD", pool_size=(2, 2))(encodeD)

        encodeE = PConv2D(name="encodeEa", filters=self.fms * 16,
                          **params)(poolD)
        encodeE = PConv2D(name="encodeEb", filters=self.fms * 16,
                          **params)(encodeE)

        if self.use_upsampling:
            up = K.layers.UpSampling2D(name="upE", size=(2, 2))(encodeE)
        else:
            up = K.layers.Conv2DTranspose(name="transconvE",
                                          filters=self.fms * 8,
                                          **params_trans)(encodeE)
        concatD = K.layers.concatenate([up, encodeD],
                                       axis=self.concat_axis,
                                       name="concatD")

        decodeC = PConv2D(name="decodeCa", filters=self.fms * 8,
                          **params)(concatD)
        decodeC = PConv2D(name="decodeCb", filters=self.fms * 8,
                          **params)(decodeC)

        if self.use_upsampling:
            up = K.layers.UpSampling2D(name="upC", size=(2, 2))(decodeC)
        else:
            up = K.layers.Conv2DTranspose(name="transconvC",
                                          filters=self.fms * 4,
                                          **params_trans)(decodeC)
        concatC = K.layers.concatenate([up, encodeC],
                                       axis=self.concat_axis,
                                       name="concatC")

        decodeB = PConv2D(name="decodeBa", filters=self.fms * 4,
                          **params)(concatC)
        decodeB = PConv2D(name="decodeBb", filters=self.fms * 4,
                          **params)(decodeB)

        if self.use_upsampling:
            up = K.layers.UpSampling2D(name="upB", size=(2, 2))(decodeB)
        else:
            up = K.layers.Conv2DTranspose(name="transconvB",
                                          filters=self.fms * 2,
                                          **params_trans)(decodeB)
        concatB = K.layers.concatenate([up, encodeB],
                                       axis=self.concat_axis,
                                       name="concatB")

        decodeA = PConv2D(name="decodeAa", filters=self.fms * 2,
                          **params)(concatB)
        decodeA = PConv2D(name="decodeAb", filters=self.fms * 2,
                          **params)(decodeA)

        if self.use_upsampling:
            up = K.layers.UpSampling2D(name="upA", size=(2, 2))(decodeA)
        else:
            up = K.layers.Conv2DTranspose(name="transconvA",
                                          filters=self.fms,
                                          **params_trans)(decodeA)
        concatA = K.layers.concatenate([up, encodeA],
                                       axis=self.concat_axis,
                                       name="concatA")

        convOut = PConv2D(name="convOuta", filters=self.fms, **params)(concatA)
        convOut = PConv2D(name="convOutb", filters=self.fms, **params)(convOut)

        prediction = K.layers.Conv2D(name="PredictionMask",
                                     filters=num_chan_out,
                                     kernel_size=(1, 1),
                                     activation="sigmoid")(convOut)

        model = K.models.Model(inputs=[inputs],
                               outputs=[prediction],
                               name="2DUNet_pconv_decathlon_brats")

        optimizer = self.optimizer

        if final:
            model.trainable = False
        else:

            model.compile(optimizer=optimizer,
                          loss=self.loss,
                          metrics=self.metrics)

            if self.print_model:
                model.summary()

        return model
Пример #7
0
masked_img[mask == 0] = np.nan

# Show side by side
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].imshow(img)
axes[1].imshow(mask)
axes[2].imshow(masked_img)
plt.show()

img = np.reshape(img, (300, 300, 1))
#img = np.repeat(img,3,axis=2)

# Input images and masks
input_img = Input(shape=shape)
input_mask = Input(shape=shape)
output_img, output_mask1 = PConv2D(8, kernel_size=(7, 7),
                                   strides=(2, 2))([input_img, input_mask])
output_img, output_mask2 = PConv2D(16, kernel_size=(5, 5),
                                   strides=(2, 2))([output_img, output_mask1])
output_img, output_mask3 = PConv2D(32, kernel_size=(5, 5),
                                   strides=(2, 2))([output_img, output_mask2])
output_img, output_mask4 = PConv2D(64, kernel_size=(3, 3),
                                   strides=(2, 2))([output_img, output_mask3])
output_img, output_mask5 = PConv2D(64, kernel_size=(3, 3),
                                   strides=(2, 2))([output_img, output_mask4])
output_img, output_mask6 = PConv2D(64, kernel_size=(3, 3),
                                   strides=(2, 2))([output_img, output_mask5])
output_img, output_mask7 = PConv2D(64, kernel_size=(3, 3),
                                   strides=(2, 2))([output_img, output_mask6])
#output_img, output_mask8 = PConv2D(64, kernel_size=(3,3), strides=(2,2))([output_img, output_mask7])

# Create model