Exemple #1
0
def up_and_concate_3d(down_layer, layer):
    in_channel = down_layer.get_shape().as_list()[4]
    out_channel = in_channel // 2
    up = Conv3DTranspose(out_channel, [2, 2, 2],
                         strides=[2, 2, 2],
                         padding='valid')(down_layer)
    print("--------------")
    print(str(up.get_shape()))

    print(str(layer.get_shape()))
    print("--------------")
    my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=4))

    concate = my_concat([up, layer])
    # must use lambda
    # concate=K.concatenate([up, layer], 3)
    return concate
Exemple #2
0
    def upLayer(self, inputLayer, concatLayer, filterSize, i, bn=False, do=False):
        up = Conv3DTranspose(filterSize, (2, 2, 2), strides=(1, 2, 2), activation='relu', padding='same',
                             name='up' + str(i))(inputLayer)
        # print( concatLayer.shape)
        up = concatenate([up, concatLayer])
        conv = Conv3D(int(filterSize / 2), (3, 3, 3), activation='relu', padding='same', name='conv' + str(i) + '_1')(
            up)
        if bn:
            conv = BatchNormalization()(conv)
        if do:
            conv = Dropout(0.5, seed=3, name='Dropout_' + str(i))(conv)
        conv = Conv3D(int(filterSize / 2), (3, 3, 3), activation='relu', padding='same', name='conv' + str(i) + '_2')(
            conv)
        if bn:
            conv = BatchNormalization()(conv)

        return conv
Exemple #3
0
def transpose_conv3d_bn(x,
                        nb_filter,
                        kernel_size,
                        strides=(1, 1, 1),
                        padding='same'):
    """
    transpose_conv3d -> batch normalization -> relu activation
    """
    x = Conv3DTranspose(nb_filter,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=regularizers.l2(REG_COFF),
                        bias_regularizer=regularizers.l2(REG_COFF))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return x
def convT_block(inp,
                filters,
                kernel_size,
                padding,
                channel_axis,
                strides=(1, 1, 1),
                dropout=0.0):
    x = Conv3DTranspose(filters=filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_initializer='he_normal',
                        use_bias=False)(inp)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    if dropout > 0.0: x = Dropout(dropout)(x)
    return x
Exemple #5
0
def _atten_gate(inputs, skip, filters):
    def __expend_as(tensor, rep):
        my_repeat = Lambda(
            lambda x, repnum: K.repeat_elements(x, repnum, axis=4),
            arguments={'repnum': rep})(tensor)
        return my_repeat

    gating = Conv3D(K.int_shape(inputs)[-1], (1, 1, 1),
                    use_bias=False,
                    padding='same')(inputs)
    gating = _norm(gating)
    shape_skip = K.int_shape(skip)
    shape_gating = K.int_shape(gating)

    #
    theta = Conv3D(filters, (2, 2, 2),
                   strides=(2, 2, 2),
                   use_bias=False,
                   padding='same')(skip)
    shape_theta = K.int_shape(theta)

    phi = Conv3D(filters, (1, 1, 1), use_bias=False, padding='same')(gating)
    phi = Conv3DTranspose(filters, (3, 3, 3),
                          strides=(shape_theta[1] // shape_gating[1],
                                   shape_theta[2] // shape_gating[2],
                                   shape_theta[3] // shape_gating[3]),
                          padding='same')(phi)

    add_xg = Add()([phi, theta])
    act_xg = Activation(activation='relu')(add_xg)
    psi = Conv3D(1, (1, 1, 1), use_bias=False, padding='same')(act_xg)
    sigmoid_xg = Activation(activation='sigmoid')(psi)
    shape_sigmoid = K.int_shape(sigmoid_xg)

    upsample_psi = UpSampling3D(size=(shape_skip[1] // shape_sigmoid[1],
                                      shape_skip[2] // shape_sigmoid[2],
                                      shape_skip[3] //
                                      shape_sigmoid[3]))(sigmoid_xg)
    upsample_psi = __expend_as(upsample_psi, shape_skip[4])

    result = Multiply()([skip, attention])
    result = Conv3D(shape_skip[3], (3, 3, 3), padding='same')(result)
    result = _norm(result)
    return result
Exemple #6
0
    def down_block_3d(H,
                      number_of_filters=64,
                      kernel_size=(12, 12, 12),
                      strides=(8, 8, 8),
                      include_dense_convolution_layer=False):
        if include_dense_convolution_layer == True:
            H = Conv3D(filters=number_of_filters,
                       use_bias=True,
                       kernel_size=(1, 1, 1),
                       strides=(1, 1, 1),
                       padding='same')(H)
            H = PReLU(alpha_initializer='zero', shared_axes=[1, 2, 3])(H)

        # Scale down
        L0 = Conv3D(filters=number_of_filters,
                    kernel_size=kernel_size,
                    strides=strides,
                    kernel_initializer='glorot_uniform',
                    padding='same')(H)
        L0 = PReLU(alpha_initializer='zero', shared_axes=[1, 2, 3])(L0)

        # Scale up
        H0 = Conv3DTranspose(filters=number_of_filters,
                             kernel_size=kernel_size,
                             strides=strides,
                             kernel_initializer='glorot_uniform',
                             padding='same')(L0)
        H0 = PReLU(alpha_initializer='zero', shared_axes=[1, 2, 3])(H0)

        # Residual
        E = Subtract()([H0, H])

        # Scale residual down
        L1 = Conv3D(filters=number_of_filters,
                    kernel_size=kernel_size,
                    strides=strides,
                    kernel_initializer='glorot_uniform',
                    padding='same')(E)
        L1 = PReLU(alpha_initializer='zero', shared_axes=[1, 2, 3])(L1)

        # Output feature map
        down_block = Add()([L0, L1])

        return (down_block)
Exemple #7
0
    def model_simple(self):
        print('MODEL')
        inputs = Input((197, 233, 189, 1))
        x = BatchNormalization()(inputs)
        # downsampling
        down1conv1 = Conv3D(2, (3, 3, 3), activation='relu', padding='same')(x)
        down1conv1 = Conv3D(2, (3, 3, 3), activation='relu',
                            padding='same')(down1conv1)
        down1pool = MaxPooling3D((2, 2, 2))(down1conv1)
        #middle
        mid_conv1 = Conv3D(2, (3, 3, 3), activation='relu',
                           padding='same')(down1pool)
        mid_conv1 = Conv3D(2, (3, 3, 3), activation='relu',
                           padding='same')(mid_conv1)

        # upsampling
        up1deconv = Conv3DTranspose(2, (3, 3, 3),
                                    strides=(2, 2, 2),
                                    activation='relu')(mid_conv1)
        up1concat = Concatenate()([up1deconv, down1conv1])
        up1conv1 = Conv3D(2, (3, 3, 3), activation='relu',
                          padding='same')(up1concat)
        up1conv1 = Conv3D(2, (3, 3, 3), activation='relu',
                          padding='same')(up1conv1)
        output = Conv3D(1, (3, 3, 3), activation='softmax',
                        padding='same')(up1conv1)

        model = Model(inputs=inputs, outputs=output)
        model.compile(optimizer='rmsprop',
                      loss='mean_squared_error',
                      metrics=['accuracy'])
        '''
        train_suffix='_LesionSmooth_*.nii.gz'
        train_id = get_train_data(path='.')
        brain_image = _read_brain_image('.', train_id[1])
        mask = _read_stroke_segmentation('.', train_id[1]) 

        model.fit(brain_image[None, ..., None], mask[None, ..., None].astype(bool))
        #model.fit_on_batch
        '''
        return model
Exemple #8
0
    def contextual_convolution(layer_input, skip_input, filters, num, axis=-1, se_block=True, se_ratio=16):
        
        llayer_shape=skip_input.get_shape().as_list()
        if llayer_shape[0]!=None:
            
            r = Conv3DTranspose(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(layer_input)

            llayer_shape=skip_input.get_shape().as_list()
            r = UpSampling3D(size=(llayer_shape[1],llayer_shape[2],llayer_shape[3]),data_format=None)(r)
            r = add([r,skip_input])
            
            if se_block == True:
                se = GlobalAveragePooling3D()(r)
                se = Dense(filters // se_ratio, activation='selu')(se)
                se = Dense(filters, activation='sigmoid')(se)
                se = Reshape([1, 1, 1, filters])(se)
                r = Multiply()([r, se])
                r = Activation('selu')(r)
        else:
            r=skip_input
        return r
def AttnGatingBlock(x, g, inter_shape, name):

    shape_x = K.int_shape(x)  # 32
    shape_g = K.int_shape(g)  # 16

    theta_x = Conv3D(inter_shape, (2, 2, 2),
                     strides=(2, 2, 2),
                     padding='same',
                     name='xl' + name)(x)  # 16
    shape_theta_x = K.int_shape(theta_x)

    phi_g = Conv3D(inter_shape, (1, 1, 1), padding='same')(g)
    upsample_g = Conv3DTranspose(inter_shape, (3, 3, 3),
                                 strides=(shape_theta_x[1] // shape_g[1],
                                          shape_theta_x[2] // shape_g[2],
                                          shape_theta_x[3] // shape_g[3]),
                                 padding='same',
                                 name='g_up' + name)(phi_g)  # 16

    concat_xg = add([upsample_g, theta_x])
    act_xg = Activation('relu')(concat_xg)
    psi = Conv3D(1, (1, 1, 1), padding='same', name='psi' + name)(act_xg)
    sigmoid_xg = Activation('sigmoid')(psi)
    shape_sigmoid = K.int_shape(sigmoid_xg)
    #print(np.shape(sigmoid_xg), np.shape(x))
    #print(shape_theta_x[3], shape_g[3])
    upsample_psi = UpSampling3D(size=(shape_x[1] // shape_sigmoid[1],
                                      shape_x[2] // shape_sigmoid[2],
                                      shape_x[3] // shape_sigmoid[3]))(
                                          sigmoid_xg)  # 32
    #print(np.shape(upsample_psi), np.shape(x))
    upsample_psi = expend_as(upsample_psi, shape_x[4], name)

    y = multiply([upsample_psi, x], name='q_attn' + name)

    result = Conv3D(shape_x[4], (1, 1, 1),
                    padding='same',
                    name='q_attn_conv' + name)(y)
    result_bn = BatchNormalization(name='q_attn_bn' + name)(result)
    return result_bn
Exemple #10
0
 def uk(self, x, k):
     # (up sampling followed by 1x1 convolution <=> fractional-strided 1/2)
     if self.use_resize_convolution:
         x = UpSampling3D(size=(2, 2, 2))(x)  # Nearest neighbor upsampling
         x = ReflectionPadding3D((1, 1, 1))(x)
         x = Conv3D(filters=k,
                    kernel_size=3,
                    strides=1,
                    padding='valid',
                    use_bias=self.use_bias)(x)
     else:
         x = Conv3DTranspose(
             filters=k,
             kernel_size=3,
             strides=2,
             padding='same',
             use_bias=self.use_bias)(
                 x)  # this matches fractionally stided with stride 1/2
     x = self.normalization(axis=4, center=True,
                            epsilon=1e-5)(x, training=True)
     x = Activation('relu')(x)
     return x
def upward_layer(input0, input1, n_convolutions, n_output_channels):
    merged = concatenate([input0, input1], axis=4)
    inl = merged
    for _ in range(n_convolutions - 1):
        inl = PReLU(shared_axes=[1, 2, 3])(Conv3D(
            n_output_channels * 4, (5, 5, 5),
            padding='same',
            data_format='channels_last')(inl))
        inl = BatchNormalization(axis=CHANNEL_AXIS)(inl)
        inl = Dropout(0.5)(inl)
    conv = Conv3D(n_output_channels * 4, (5, 5, 5),
                  padding='same',
                  data_format='channels_last')(inl)
    added = add([conv, merged])
    upsample = Conv3DTranspose(n_output_channels, (2, 2, 2),
                               strides=(2, 2, 2),
                               padding='SAME',
                               use_bias=True,
                               kernel_initializer='glorot_uniform',
                               bias_initializer='zeros',
                               data_format='channels_last')(added)
    return PReLU(shared_axes=[1, 2, 3])(upsample), merged, inl, added
Exemple #12
0
 def _upconv3d(self,
               inputs,
               skip_input,
               filters,
               se_block=True,
               se_ratio=16,
               loop=2):
     x = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(inputs)
     x = Conv3DTranspose(filters, (2, 2, 2),
                         strides=(2, 2, 2),
                         use_bias=False,
                         padding='same')(x)
     x = self._norm(x)
     x = self._activation(x)
     x = self._crop_concat()([x, skip_input])
     x = self._conv3d(x,
                      filters,
                      se_block=se_block,
                      se_ratio=se_ratio,
                      downsizing=False,
                      loop=loop)
     return x
 def deconv3d(layer_input, skip_input, filters, axis=-1, se_res_block=True, se_ratio=16):
     u1 = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(layer_input)
     u1 = Conv3DTranspose(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(u1)
     u1 = InstanceNormalization(axis=axis)(u1)
     u1 = LeakyReLU(alpha=0.3)(u1)
     u1 = CropToConcat3D()([u1, skip_input])
     u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
     u2 = InstanceNormalization(axis=axis)(u2)
     u2 = LeakyReLU(alpha=0.3)(u2)
     u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u2)
     u2 = InstanceNormalization(axis=axis)(u2)
     if se_res_block == True:
         se = GlobalAveragePooling3D()(u2)
         se = Dense(filters // se_ratio, activation='relu')(se)
         se = Dense(filters, activation='sigmoid')(se)
         se = Reshape([1, 1, 1, filters])(se)
         u2 = Multiply()([u2, se])
         shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
         shortcut = InstanceNormalization(axis=axis)(shortcut)
         u2 = layers.add([u2, shortcut])
     u2 = LeakyReLU(alpha=0.3)(u2)
     return u2
Exemple #14
0
def level_block(m, dim, depth, inc, acti, do, bn, pool_type, up, res):
    if depth > 0:
        n = conv_block(m, dim, acti, bn, res)
        if pool_type == 0:
            m = MaxPooling3D(pool_size=(2, 2, 2))(n)
        elif pool_type == 1:
            m = AveragePooling3D(pool_size=(2, 2, 2))(n)
        else:
            Conv3D(dim, 3, strides=2, padding='same')(n)

        m = level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn,
                        pool_type, up, res)

        if up:
            m = UpSampling3D(size=(2, 2, 2))(m)
            diff_phi = n.shape[1] - m.shape[1]
            diff_r = n.shape[2] - m.shape[2]
            diff_z = n.shape[3] - m.shape[3]
            if diff_phi != 0:
                m = symmetryPadding3d(padding=((int(diff_phi), 0),
                                               (int(diff_r), 0), (int(diff_z),
                                                                  0)),
                                      mode="SYMMETRIC")(m)
            elif (diff_r != 0 or diff_z != 0):
                m = symmetryPadding3d(padding=((int(diff_phi), 0),
                                               (int(diff_r), 0), (int(diff_z),
                                                                  0)),
                                      mode="CONSTANT")(m)
        else:
            m = Conv3DTranspose(dim,
                                3,
                                strides=2,
                                activation=acti,
                                padding='same')(m)
        n = concatenate([n, m])
        m = conv_block(n, dim, acti, bn, res)
    else:
        m = conv_block(m, dim, acti, bn, res, do)
    return m
Exemple #15
0
def expanding_layer(input, neurons, concatenate_link, regularizer):
    up = concatenate([
        Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2),
                        padding='same')(input), concatenate_link
    ],
                     axis=4)

    up = BatchNormalization(axis=-1)(up)
    conv1 = Conv3D(neurons, (3, 3, 3),
                   activation='relu',
                   kernel_regularizer=regularizer,
                   padding='same')(up)

    conv1 = BatchNormalization(axis=-1)(conv1)
    conv2 = Conv3D(neurons, (3, 3, 3),
                   activation='relu',
                   kernel_regularizer=regularizer,
                   padding='same')(conv1)

    conc1 = concatenate([up, conv2], axis=4)

    return conc1
Exemple #16
0
def iliopsoas_seg_net(input_size):

    # Down 1
    inputs = Input(input_size)
    conv_1 = Conv3D(8, kernel_size=5, strides=1, activation=selu, padding='same', kernel_initializer='he_normal')(inputs)
    repeat_1 = concatenate(8 * [inputs], axis=-1)
    add_1 = add([conv_1, repeat_1])
    down_1 = Conv3D(16, 2, strides=2, activation=selu, padding='same', kernel_initializer='he_normal')(add_1)

    # Down 2,3,4
    down_2, add_2 = downward_layer(down_1, 2, 32)
    down_3, add_3 = downward_layer(down_2, 3, 64)
    down_4, add_4 = downward_layer(down_3, 3, 128)

    # Bottom
    conv_5_1 = Conv3D(128, kernel_size=(5, 5, 5), activation=selu, padding='same', kernel_initializer='he_normal')(down_4)
    conv_5_2 = Conv3D(128, kernel_size=(5, 5, 5), activation=selu, padding='same', kernel_initializer='he_normal')(conv_5_1)
    conv_5_3 = Conv3D(128, kernel_size=(5, 5, 5), activation=selu, padding='same', kernel_initializer='he_normal')(conv_5_2)
    add5 = add([conv_5_3, down_4])
    aux_shape = add5.get_shape()
    upsample_5 = Conv3DTranspose(64, (2, 2, 2), padding='same', activation=selu, strides=(2, 2, 2), kernel_initializer='he_normal')(add5)

    # Up 6,7,8
    upsample_6 = upward_layer(upsample_5, add_4, 3, 32)
    upsample_7 = upward_layer(upsample_6, add_3, 3, 16)
    upsample_8 = upward_layer(upsample_7, add_2, 2, 8)

    # Up 9
    merged_9 = concatenate([upsample_8, add_1], axis=4)
    conv_9_1 = Conv3D(16, kernel_size=(5, 5, 5), activation=selu, padding='same', kernel_initializer='he_normal')(merged_9)

    add_9 = add([conv_9_1, merged_9])
    conv_9_2 = Conv3D(1, kernel_size=(1, 1, 1), activation=selu, padding='same', kernel_initializer='he_normal')(add_9)

    sigmoid = Conv3D(1, kernel_size=(1, 1, 1), padding='same', kernel_initializer='he_normal', activation='sigmoid')(conv_9_2)

    model = Model(inputs=inputs, outputs=sigmoid)
    return model
def __vnet_level__(in_layer, filters, config, remove_last_conv=False):
    if len(filters) == 1:
        return LeakyReLU()(Conv3D(filters=filters[0],
                                  kernel_size=3,
                                  padding='same')(in_layer))
    else:
        tlayer = LeakyReLU()(Conv3D(filters=filters[0],
                                    kernel_size=3,
                                    padding='same')(in_layer))
        tlayer = BatchNormalization()(tlayer) if bool(
            config.get("batchnorm", False)) else tlayer

        down = LeakyReLU()(Conv3D(filters=filters[0],
                                  kernel_size=3,
                                  strides=2,
                                  padding='same')(tlayer))
        down = BatchNormalization()(tlayer) if bool(
            config.get("batchnorm", False)) else down

        out_deeper = __vnet_level__(down, filters[1:], config)
        if remove_last_conv:
            return out_deeper
        up = LeakyReLU()(Conv3DTranspose(filters=filters[0],
                                         kernel_size=3,
                                         strides=2,
                                         padding='same')(out_deeper))

        tlayer = Concatenate()([up, tlayer])
        tlayer = BatchNormalization()(tlayer) if bool(
            config.get("batchnorm", False)) else tlayer

        tlayer = LeakyReLU()(Conv3D(filters=filters[0],
                                    kernel_size=3,
                                    padding='same')(tlayer))
        tlayer = BatchNormalization()(tlayer) if bool(
            config.get("batchnorm", False)) else tlayer

        return tlayer
Exemple #18
0
def transpose_shortcut(input, residual, name=None):
    """
    transpose short cut
    """
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_height = int(round(residual_shape[1] / input_shape[1]))
    stride_width = int(round(residual_shape[2] / input_shape[2]))
    stride_length = int(round(residual_shape[3] / input_shape[3]))
    equal_channels = input_shape[4] == residual_shape[4]

    identity = input

    if stride_width > 1 or stride_height > 1 or stride_length > 1 or not equal_channels:
        identity = Conv3DTranspose(
            filters=residual_shape[4],
            kernel_size=(1, 1, 1),
            strides=(stride_width, stride_height, stride_length),
            padding="valid",
            kernel_regularizer=regularizers.l2(REG_COFF),
            bias_regularizer=regularizers.l2(REG_COFF))(input)

    return add([identity, residual], name=name)
Exemple #19
0
    def deconv3d(layer_input, skip_input, filters, axis=-1, se_res_block=True, se_ratio=16, atten_gate=False):
        if atten_gate == True:
            gating = Conv3D(filters, (1, 1, 1), use_bias=False, padding='same')(layer_input)
            gating = InstanceNormalization(axis=axis)(gating)
            attention = Conv3D(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='valid')(skip_input)
            attention = InstanceNormalization(axis=axis)(attention)
            attention = add([gating, attention])
            attention = Conv3D(1, (1, 1, 1), use_bias=False, padding='same', activation='sigmoid')(attention)
            # attention = Lambda(resize_by_axis, arguments={'dim_1':skip_input.get_shape().as_list()[1],'dim_2':skip_input.get_shape().as_list()[2],'ax':3})(attention) # error when None dimension is feeded.
            # attention = Lambda(resize_by_axis, arguments={'dim_1':skip_input.get_shape().as_list()[1],'dim_2':skip_input.get_shape().as_list()[3],'ax':2})(attention)
            attention = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(attention)
            attention = UpSampling3D((2, 2, 2))(attention)
            attention = CropToConcat3D(mode='crop')([attention, skip_input])
            attention = Lambda(lambda x: K.tile(x, [1, 1, 1, 1, filters]))(attention)
            skip_input = multiply([skip_input, attention])

        u1 = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(layer_input)
        u1 = Conv3DTranspose(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(u1)
        u1 = InstanceNormalization(axis=axis)(u1)
        u1 = LeakyReLU(alpha=0.3)(u1)
        u1 = CropToConcat3D()([u1, skip_input])
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
        u2 = InstanceNormalization(axis=axis)(u2)
        u2 = LeakyReLU(alpha=0.3)(u2)
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u2)
        u2 = InstanceNormalization(axis=axis)(u2)
        if se_res_block == True:
            se = GlobalAveragePooling3D()(u2)
            se = Dense(filters // se_ratio, activation='relu')(se)
            se = Dense(filters, activation='sigmoid')(se)
            se = Reshape([1, 1, 1, filters])(se)
            u2 = Multiply()([u2, se])
            shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
            shortcut = InstanceNormalization(axis=axis)(shortcut)
            u2 = add([u2, shortcut])
        u2 = LeakyReLU(alpha=0.3)(u2)
        return u2
def SDN_ver2(inputs): #should control the size carefully, larger strides to downsample 

#    z1_1 = Conv3D(32, par['kernel_size'], padding = 'same')(inputs)

    z1_2 = Conv3D(32, par['kernel_size'], strides = par['kernel_strides'], padding = 'valid', activation = 'linear')(inputs)
    #z1_2 = BatchNormalization()(z1_2)
    z1_2 = PReLU(shared_axes = [4])(z1_2)
#    z2_1 = Conv3D(64, par['kernel_size'], padding = 'same')(z1_2)

    z2_2 = Conv3D(64, par['kernel_size'], strides = par['kernel_strides'], padding = 'valid', activation = 'linear')(z1_2)
   # z2_2 = BatchNormalization()(z2_2)  
    z2_2 = PReLU(shared_axes = [4])(z2_2)
    
    z4x = Conv3DTranspose(64, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z2_2)
    z4x = Conv3D(64, (2,2,2), padding = 'same')(z4x)
    z5x = Conv3DTranspose(32, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z4x)   
    z5x = Conv3D(32, (2,2,2), padding = 'same')(z5x)
    z5x = ZeroPadding3D((2,1,2))(z5x)
    zzzzx = Conv3D(1, par['kernel_size'], padding = 'valid', activation = 'tanh')(z5x)

    z4y = Conv3DTranspose(64, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid',activation = 'linear')(z2_2)
    z4y = Conv3D(64, (2,2,2), padding = 'same')(z4y)

    z5y = Conv3DTranspose(32, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z4y)   
    z5y = Conv3D(32, (2,2,2), padding = 'same')(z5y)

    z5y = ZeroPadding3D((2,1,2))(z5y)
    zzzzy = Conv3D(1, par['kernel_size'], padding = 'valid', activation = 'tanh')(z5y)

    z4z = Conv3DTranspose(64, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z2_2)
    z4z = Conv3D(64, (2,2,2), padding = 'same')(z4z)

    z5z = Conv3DTranspose(32, par['kernel_size'], strides= par['kernel_strides'], padding = 'valid', activation = 'linear')(z4z)   
    z5z = Conv3D(32, (2,2,2), padding = 'same')(z5z)
    z5z = ZeroPadding3D((2,1,2))(z5z)
    zzzzz = Conv3D(1, par['kernel_size'], padding = 'valid', activation = 'tanh')(z5z)   

    zzzz = concatenate([zzzzx, zzzzy, zzzzz], axis = -1)   

    locnet = Model(inputs, zzzz)    

    x1 = SpatialDeformer3D(localization_net=locnet, output_size=(input_shape[0],input_shape[1], input_shape[2]), input_shape=input_shape)(inputs)   

    return x1, locnet(inputs)
Exemple #21
0
 def deconv_block(self,
                  input_tensor,
                  input_channel,
                  output_channel,
                  o_name,
                  strides=(2, 2, 2)):
     """
     # Arguments
         input_tensor: input tensor
         input_channel: input channel
         output_channel: output channel
         strides: stride
     # Returns
         Output tensor for the block.
     """
     x = Conv3DTranspose(output_channel,
                         kernel_size=(2, 2, 2),
                         strides=strides,
                         kernel_initializer="random_normal",
                         name=o_name + '.0',
                         data_format='channels_first')(input_tensor)
     x = BatchNormalization(axis=1, momentum=0.1, name=o_name + '.1')(x)
     x = Activation('relu')(x)
     return x
Exemple #22
0
def level_block(m, dim, depth, inc_rate, activation, dropout, batchnorm,
                pool_type, upconv, residual):
    if depth > 0:
        n = conv_block(m, dim, activation, batchnorm, residual)
        if pool_type == 0:
            m = MaxPooling3D(pool_size=(2, 2, 2))(n)
        elif pool_type == 1:
            m = AveragePooling3D(pool_size=(2, 2, 2))(n)
        else:
            Conv3D(dim, 3, strides=2, padding='same')(n)

        m = level_block(m, int(inc_rate * dim), depth - 1, inc_rate,
                        activation, dropout, batchnorm, pool_type, upconv,
                        residual)

        if upconv:
            m = UpSampling3D(size=(2, 2, 2))(m)
            diff_phi = n.shape[1] - m.shape[1]
            diff_r = n.shape[2] - m.shape[2]
            diff_z = n.shape[3] - m.shape[3]
            padding = [[int(diff_phi), 0], [int(diff_r), 0], [int(diff_z), 0]]
            if diff_phi != 0:
                m = SymmetryPadding3d(padding=padding, mode="SYMMETRIC")(m)
            elif (diff_r != 0 or diff_z != 0):
                m = SymmetryPadding3d(padding=padding, mode="CONSTANT")(m)
        else:
            m = Conv3DTranspose(dim,
                                3,
                                strides=2,
                                activation=activation,
                                padding='same')(m)
        n = concatenate([n, m])
        m = conv_block(n, dim, activation, batchnorm, residual)
    else:
        m = conv_block(m, dim, activation, batchnorm, residual, dropout)
    return m
Exemple #23
0
def Decoding_Deep_Supervision(inputs_1,
                              inputs_2,
                              filters=64,
                              blocks=4,
                              channel=3):
    x = inputs_1
    output_list = []
    for index in np.arange(blocks - 2, -1, -1):
        x = Conv3DTranspose(filters * np.power(2, index),
                            kernel_size=(2, 2, 1),
                            strides=(2, 2, 1),
                            padding='same')(x)
        x = Concatenate(axis=4)([x, inputs_2[index]])

        x = Conv3D(filters * np.power(2, index),
                   kernel_size=(3, 3, 3),
                   strides=1,
                   padding='same',
                   kernel_initializer='he_normal')(x)
        x = BatchNormalization()(x)
        x = PReLU()(x)

        x = Conv3D(filters * np.power(2, index),
                   kernel_size=(3, 3, 3),
                   strides=1,
                   padding='same',
                   kernel_initializer='he_normal')(x)
        x = BatchNormalization()(x)
        x = PReLU()(x)

        output = Conv3D(channel, (1, 1, 1), activation='softmax')(x)
        output_list.append(output)

    output_list.reverse()

    return output_list
Exemple #24
0
def _unet_upconv_block(inputs,
                       skip_input,
                       filters,
                       skip='unet',
                       top_down='unet',
                       norm='bn',
                       activation='relu'):
    if 'attention' in skip:
        skip_input = [_atten_gate(inputs, skip_input, filters)]
    elif 'dense' in skip:
        raise ValueError()
    elif 'unet' in skip:
        skip_input = [skip_input]

    x = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(inputs)
    x = Conv3DTranspose(filters, (2, 2, 2),
                        strides=(2, 2, 2),
                        use_bias=False,
                        padding='same')(x)
    x = _normalization(x, norm=norm)
    x = _activation(x, activation=activation)
    x = _crop_concat()([x] + skip_input)

    if 'unet' in top_down:
        x = _basic_block(x,
                         filters, (3, 3, 3),
                         norm=norm,
                         activation=activation,
                         is_seblock=True if 'se' in top_down else False)
        x = _basic_block(x,
                         filters, (3, 3, 3),
                         norm=norm,
                         activation=activation,
                         is_seblock=True if 'se' in top_down else False)

    return x
Exemple #25
0
def getModel(temporal_depth, img_rows, img_cols, channels, depth, kernels, max_kernel_multiplier=16, activation='sigmoid'):
    inputs = Input((temporal_depth, img_rows, img_cols, channels))  # 64
    x = inputs
    connection = []
    for k in range(0, depth+1):
        if 2**k > max_kernel_multiplier:
            kernels_multiplier = max_kernel_multiplier
        else:
            kernels_multiplier = 2**k
        x = Conv3D(kernels * kernels_multiplier, 3, padding='same', kernel_initializer='he_normal')(x)
        x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.0001, center=False, scale=False)(x)
        x = activate(x, 'leakyRelu')
        x = resBlock(x, kernels * kernels_multiplier, 'leakyRelu')
        connection.append(x)
        if k < depth:
            x = MaxPooling3D(2)(x)
        # x = getResBlock(x, kernels * kernels_multiplier, 'leakyRelu')

    for k in range(depth-1, -1, -1):
        # x = UpSampling3D(2)(x)
        if 2**k > max_kernel_multiplier:
            kernels_multiplier = max_kernel_multiplier
        else:
            kernels_multiplier = 2**k
        x = Conv3DTranspose(kernels * kernels_multiplier, 3, strides=2, padding='same', kernel_initializer='he_normal')(x)
        x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.0001, center=False, scale=False)(x)
        x = activate(x, 'relu')
        x = Concatenate(axis=-1)([connection[k], x])
        if k > depth-3:
            x = Dropout(0.5)(x)
        x = Conv3D(kernels * kernels_multiplier, 3, padding='same', kernel_initializer='he_normal')(x)
        x = resBlock(x, kernels * kernels_multiplier, 'relu')
    
    out_layer = Conv3D(1, 1, activation=activation, padding='same', kernel_initializer='he_normal')(x)
    model = Model(inputs=inputs, outputs=out_layer, name='resUnet3d')
    return model
def create_unet_model_3d(input_image_size,
                         number_of_outputs=2,
                         number_of_layers=4,
                         number_of_filters_at_base_layer=32,
                         convolution_kernel_size=(3, 3, 3),
                         deconvolution_kernel_size=(2, 2, 2),
                         pool_size=(2, 2, 2),
                         strides=(2, 2, 2),
                         dropout_rate=0.0,
                         weight_decay=0.0,
                         add_attention_gating=False,
                         mode='classification'):
    """
    3-D implementation of the U-net deep learning architecture.

    Creates a keras model of the U-net deep learning architecture for image
    segmentation and regression.  More information is provided at the authors'
    website:

            https://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/

    with the paper available here:

            https://arxiv.org/abs/1505.04597

    This particular implementation was influenced by the following python
    implementation:

            https://github.com/joelthelion/ultrasound-nerve-segmentation


    Arguments
    ---------
    input_image_size : tuple of length 4
        Used for specifying the input tensor shape.  The shape (or dimension) of
        that tensor is the image dimensions followed by the number of channels
        (e.g., red, green, and blue).

    number_of_outputs : integer
        Meaning depends on the mode.  For `classification` this is the number of
        segmentation labels.  For `regression` this is the number of outputs.

    number_of_layers : integer
        number of encoding/decoding layers.

    number_of_filters_at_base_layer : integer
        number of filters at the beginning and end of the `U`.  Doubles at each
        descending/ascending layer.

    convolution_kernel_size : tuple of length 3
        Defines the kernel size during the encoding.

    deconvolution_kernel_size : tuple of length 3
        Defines the kernel size during the decoding.

    pool_size : tuple of length 3
        Defines the region for each pooling layer.

    strides : tuple of length 3
        Strides for the convolutional layers.

    dropout_rate : scalar
        Float between 0 and 1 to use between dense layers.

    weight_decay :  scalar
        Weighting parameter for L2 regularization of the kernel weights of the
        convolution layers.  Default = 0.0.

    add_attention_gating :  boolean
        Whether or not to include attention gating.

    mode :  string
        `classification` or `regression`.  Default = `classification`.

    Returns
    -------
    Keras model
        A 3-D keras model defining the U-net network.

    Example
    -------
    >>> model = create_unet_model_3d((128, 128, 128, 1))
    >>> model.summary()
    """
    def attention_gate_3d(x, g, inter_shape):
        x_theta = Conv3D(filters=inter_shape,
                         kernel_size=(1, 1, 1),
                         strides=(1, 1, 1))(x)
        g_phi = Conv3D(filters=inter_shape,
                       kernel_size=(1, 1, 1),
                       strides=(1, 1, 1))(g)
        f = Add()([x_theta, g_phi])
        f = ReLU()(f)
        f_psi = Conv3D(filters=1, kernel_size=(1, 1, 1), strides=(1, 1, 1))(f)
        alpha = Activation('sigmoid')(f_psi)
        attention = multiply([x, alpha])
        return attention

    inputs = Input(shape=input_image_size)

    # Encoding path

    encoding_convolution_layers = []
    pool = None
    for i in range(number_of_layers):
        number_of_filters = number_of_filters_at_base_layer * 2**i

        if i == 0:
            conv = Conv3D(
                filters=number_of_filters,
                kernel_size=convolution_kernel_size,
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(weight_decay))(inputs)
        else:
            conv = Conv3D(
                filters=number_of_filters,
                kernel_size=convolution_kernel_size,
                activation='relu',
                padding='same',
                kernel_regularizer=regularizers.l2(weight_decay))(pool)

        if dropout_rate > 0.0:
            conv = Dropout(rate=dropout_rate)(conv)

        encoding_convolution_layers.append(
            Conv3D(filters=number_of_filters,
                   kernel_size=convolution_kernel_size,
                   activation='relu',
                   padding='same')(conv))

        if i < number_of_layers - 1:
            pool = MaxPooling3D(pool_size=pool_size)(
                encoding_convolution_layers[i])

    # Decoding path

    outputs = encoding_convolution_layers[number_of_layers - 1]
    for i in range(1, number_of_layers):
        number_of_filters = number_of_filters_at_base_layer * 2**(
            number_of_layers - i - 1)
        deconv = Conv3DTranspose(
            filters=number_of_filters,
            kernel_size=deconvolution_kernel_size,
            padding='same',
            kernel_regularizer=regularizers.l2(weight_decay))(outputs)
        deconv = UpSampling3D(size=pool_size)(deconv)

        if add_attention_gating == True:
            outputs = attention_gate_3d(
                deconv, encoding_convolution_layers[number_of_layers - i - 1],
                number_of_filters // 4)
            outputs = Concatenate(axis=4)([deconv, outputs])
        else:
            outputs = Concatenate(axis=4)([
                deconv, encoding_convolution_layers[number_of_layers - i - 1]
            ])

        outputs = Conv3D(
            filters=number_of_filters,
            kernel_size=convolution_kernel_size,
            activation='relu',
            padding='same',
            kernel_regularizer=regularizers.l2(weight_decay))(outputs)

        if dropout_rate > 0.0:
            outputs = Dropout(rate=dropout_rate)(outputs)

        outputs = Conv3D(
            filters=number_of_filters,
            kernel_size=convolution_kernel_size,
            activation='relu',
            padding='same',
            kernel_regularizer=regularizers.l2(weight_decay))(outputs)

    convActivation = ''

    if mode == 'classification':
        convActivation = 'softmax'
    elif mode == 'regression':
        convActivation = 'linear'
    else:
        raise ValueError(
            'mode must be either `classification` or `regression`.')

    outputs = Conv3D(filters=number_of_outputs,
                     kernel_size=(1, 1, 1),
                     activation=convActivation,
                     kernel_regularizer=regularizers.l2(weight_decay))(outputs)

    unet_model = Model(inputs=inputs, outputs=outputs)

    return unet_model
def get_3DUnetPP(images_x,
                 images_y,
                 images_z,
                 color_type=1,
                 num_class=1,
                 deep_supervision=False):
    nb_filter = [32, 64, 128, 256, 512]

    # Handle Dimension Ordering for different backends
    global bn_axis
    if K.image_dim_ordering() == 'tf':
        bn_axis = -1
        img_input = Input(shape=(images_x, images_y, images_z, color_type),
                          name='main_input')
    else:
        bn_axis = 1
        img_input = Input(shape=(color_type, images_x, images_y, images_z),
                          name='main_input')

    conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
    pool1 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='pool1')(conv1_1)

    conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
    pool2 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='pool2')(conv2_1)

    up1_2 = Conv3DTranspose(nb_filter[0], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up12',
                            padding='same')(conv2_1)
    conv1_2 = concatenate([up1_2, conv1_1], name='merge12', axis=bn_axis)
    conv1_2 = standard_unit(conv1_2, stage='12', nb_filter=nb_filter[0])

    conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
    pool3 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='pool3')(conv3_1)

    up2_2 = Conv3DTranspose(nb_filter[1], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up22',
                            padding='same')(conv3_1)
    conv2_2 = concatenate([up2_2, conv2_1], name='merge22', axis=bn_axis)
    conv2_2 = standard_unit(conv2_2, stage='22', nb_filter=nb_filter[1])

    up1_3 = Conv3DTranspose(nb_filter[0], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up13',
                            padding='same')(conv2_2)
    conv1_3 = concatenate([up1_3, conv1_1, conv1_2],
                          name='merge13',
                          axis=bn_axis)
    conv1_3 = standard_unit(conv1_3, stage='13', nb_filter=nb_filter[0])

    conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
    pool4 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), name='pool4')(conv4_1)

    up3_2 = Conv3DTranspose(nb_filter[2], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up32',
                            padding='same')(conv4_1)
    conv3_2 = concatenate([up3_2, conv3_1], name='merge32', axis=bn_axis)
    conv3_2 = standard_unit(conv3_2, stage='32', nb_filter=nb_filter[2])

    up2_3 = Conv3DTranspose(nb_filter[1], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up23',
                            padding='same')(conv3_2)
    conv2_3 = concatenate([up2_3, conv2_1, conv2_2],
                          name='merge23',
                          axis=bn_axis)
    conv2_3 = standard_unit(conv2_3, stage='23', nb_filter=nb_filter[1])

    up1_4 = Conv3DTranspose(nb_filter[0], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up14',
                            padding='same')(conv2_3)
    conv1_4 = concatenate([up1_4, conv1_1, conv1_2, conv1_3],
                          name='merge14',
                          axis=bn_axis)
    conv1_4 = standard_unit(conv1_4, stage='14', nb_filter=nb_filter[0])

    conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])

    up4_2 = Conv3DTranspose(nb_filter[3], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up42',
                            padding='same')(conv5_1)
    conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
    conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])

    up3_3 = Conv3DTranspose(nb_filter[2], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up33',
                            padding='same')(conv4_2)
    conv3_3 = concatenate([up3_3, conv3_1, conv3_2],
                          name='merge33',
                          axis=bn_axis)
    conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])

    up2_4 = Conv3DTranspose(nb_filter[1], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up24',
                            padding='same')(conv3_3)
    conv2_4 = concatenate([up2_4, conv2_1, conv2_2, conv2_3],
                          name='merge24',
                          axis=bn_axis)
    conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])

    up1_5 = Conv3DTranspose(nb_filter[0], (2, 2, 2),
                            strides=(2, 2, 2),
                            name='up15',
                            padding='same')(conv2_4)
    conv1_5 = concatenate([up1_5, conv1_1, conv1_2, conv1_3, conv1_4],
                          name='merge15',
                          axis=bn_axis)
    conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])

    # method1
    nestnet_output_1 = Conv3D(num_class, (1, 1, 1),
                              activation='sigmoid',
                              name='output_1',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_2)
    nestnet_output_2 = Conv3D(num_class, (1, 1, 1),
                              activation='sigmoid',
                              name='output_2',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_3)
    nestnet_output_3 = Conv3D(num_class, (1, 1, 1),
                              activation='sigmoid',
                              name='output_3',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_4)
    nestnet_output_4 = Conv3D(num_class, (1, 1, 1),
                              activation='sigmoid',
                              name='output_4',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_5)

    # method2
    # conv_final1 = Conv3D(2, (1, 1, 1), activation=act, name='class',
    #                      kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_2)
    # conv_final2 = Conv3D(2, (1, 1, 1), activation=act, name='class',
    #                      kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_3)
    # conv_final3 = Conv3D(2, (1, 1, 1), activation=act, name='class',
    #                      kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_4)
    # conv_final4 = Conv3D(2, (1, 1, 1), activation=act, name='class',
    #                      kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
    # nestnet_output_1 = Conv3D(num_class, (1, 1, 1), activation='sigmoid', name='output_1',
    #                           kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv_final1)
    # nestnet_output_2 = Conv3D(num_class, (1, 1, 1), activation='sigmoid', name='output_2',
    #                           kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv_final2)
    # nestnet_output_3 = Conv3D(num_class, (1, 1, 1), activation='sigmoid', name='output_3',
    #                           kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv_final3)
    # nestnet_output_4 = Conv3D(num_class, (1, 1, 1), activation='sigmoid', name='output_4',
    #                           kernel_initializer='he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv_final4)

    if deep_supervision:
        model = Model(input=img_input,
                      output=[
                          nestnet_output_1, nestnet_output_2, nestnet_output_3,
                          nestnet_output_4
                      ])
    else:
        model = Model(input=img_input, output=[nestnet_output_4])

    return model
Exemple #28
0
def load_model(input_shape, num_labels, axis=-1, base_filter=32, depth_size=4, se_res_block=True, se_ratio=16,
               noise=0.1, last_relu=False, atten_gate=False):
    def conv3d(layer_input, filters, axis=-1, se_res_block=True, se_ratio=16, down_sizing=True):
        if down_sizing == True:
            layer_input = MaxPooling3D(pool_size=(2, 2, 2))(layer_input)
        d = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(layer_input)
        d = InstanceNormalization(axis=axis)(d)
        d = LeakyReLU(alpha=0.3)(d)
        d = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(d)
        d = InstanceNormalization(axis=axis)(d)
        if se_res_block == True:
            se = GlobalAveragePooling3D()(d)
            se = Dense(filters // se_ratio, activation='relu')(se)
            se = Dense(filters, activation='sigmoid')(se)
            se = Reshape([1, 1, 1, filters])(se)
            d = Multiply()([d, se])
            shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(layer_input)
            shortcut = InstanceNormalization(axis=axis)(shortcut)
            d = add([d, shortcut])
        d = LeakyReLU(alpha=0.3)(d)
        return d

    def deconv3d(layer_input, skip_input, filters, axis=-1, se_res_block=True, se_ratio=16, atten_gate=False):
        if atten_gate == True:
            gating = Conv3D(filters, (1, 1, 1), use_bias=False, padding='same')(layer_input)
            gating = InstanceNormalization(axis=axis)(gating)
            attention = Conv3D(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='valid')(skip_input)
            attention = InstanceNormalization(axis=axis)(attention)
            attention = add([gating, attention])
            attention = Conv3D(1, (1, 1, 1), use_bias=False, padding='same', activation='sigmoid')(attention)
            # attention = Lambda(resize_by_axis, arguments={'dim_1':skip_input.get_shape().as_list()[1],'dim_2':skip_input.get_shape().as_list()[2],'ax':3})(attention) # error when None dimension is feeded.
            # attention = Lambda(resize_by_axis, arguments={'dim_1':skip_input.get_shape().as_list()[1],'dim_2':skip_input.get_shape().as_list()[3],'ax':2})(attention)
            attention = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(attention)
            attention = UpSampling3D((2, 2, 2))(attention)
            attention = CropToConcat3D(mode='crop')([attention, skip_input])
            attention = Lambda(lambda x: K.tile(x, [1, 1, 1, 1, filters]))(attention)
            skip_input = multiply([skip_input, attention])

        u1 = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(layer_input)
        u1 = Conv3DTranspose(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(u1)
        u1 = InstanceNormalization(axis=axis)(u1)
        u1 = LeakyReLU(alpha=0.3)(u1)
        u1 = CropToConcat3D()([u1, skip_input])
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
        u2 = InstanceNormalization(axis=axis)(u2)
        u2 = LeakyReLU(alpha=0.3)(u2)
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u2)
        u2 = InstanceNormalization(axis=axis)(u2)
        if se_res_block == True:
            se = GlobalAveragePooling3D()(u2)
            se = Dense(filters // se_ratio, activation='relu')(se)
            se = Dense(filters, activation='sigmoid')(se)
            se = Reshape([1, 1, 1, filters])(se)
            u2 = Multiply()([u2, se])
            shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
            shortcut = InstanceNormalization(axis=axis)(shortcut)
            u2 = add([u2, shortcut])
        u2 = LeakyReLU(alpha=0.3)(u2)
        return u2

    def CropToConcat3D(mode='concat'):
        def crop_to_concat_3D(concat_layers, axis=-1):
            bigger_input, smaller_input = concat_layers
            bigger_shape, smaller_shape = tf.shape(bigger_input), \
                                          tf.shape(smaller_input)
            sh, sw, sd = smaller_shape[1], smaller_shape[2], smaller_shape[3]
            bh, bw, bd = bigger_shape[1], bigger_shape[2], bigger_shape[3]
            dh, dw, dd = bh - sh, bw - sw, bd - sd
            cropped_to_smaller_input = bigger_input[:, :-dh,
                                       :-dw,
                                       :-dd, :]
            if mode == 'concat':
                return K.concatenate([smaller_input, cropped_to_smaller_input], axis=axis)
            elif mode == 'add':
                return smaller_input + cropped_to_smaller_input
            elif mode == 'crop':
                return cropped_to_smaller_input

        return Lambda(crop_to_concat_3D)

    def resize_by_axis(image, dim_1, dim_2, ax):  # it is available only for 1 channel 3D
        resized_list = []
        unstack_img_depth_list = tf.unstack(image, axis=ax)
        for i in unstack_img_depth_list:
            resized_list.append(tf.image.resize_images(i, [dim_1, dim_2]))  # defaults to ResizeMethod.BILINEAR
        stack_img = tf.stack(resized_list, axis=ax + 1)
        return stack_img

    input_img = Input(shape=input_shape, name='Input')
    d0 = GaussianNoise(noise)(input_img)
    d1 = Conv3D(base_filter, (3, 3, 3), use_bias=False, padding='same')(d0)
    d1 = InstanceNormalization(axis=axis)(d1)
    d1 = LeakyReLU(alpha=0.3)(d1)
    d2 = conv3d(d1, base_filter * 2, se_res_block=se_res_block)
    d3 = conv3d(d2, base_filter * 4, se_res_block=se_res_block)
    d4 = conv3d(d3, base_filter * 8, se_res_block=se_res_block)

    if depth_size == 4:
        d5 = conv3d(d4, base_filter * 16, se_res_block=se_res_block)
        u4 = deconv3d(d5, d4, base_filter * 8, se_res_block=se_res_block, atten_gate=atten_gate)
        u3 = deconv3d(u4, d3, base_filter * 4, se_res_block=se_res_block, atten_gate=atten_gate)
    elif depth_size == 3:
        u3 = deconv3d(d4, d3, base_filter * 4, se_res_block=se_res_block, atten_gate=atten_gate)
    else:
        raise Exception('depth size must be 3 or 4. you put ', depth_size)

    u2 = deconv3d(u3, d2, base_filter * 2, se_res_block=se_res_block, atten_gate=atten_gate)
    u1 = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(u2)
    u1 = Conv3DTranspose(base_filter, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(u1)
    u1 = InstanceNormalization(axis=axis)(u1)
    u1 = LeakyReLU(alpha=0.3)(u1)
    u1 = CropToConcat3D()([u1, d1])
    u1 = Conv3D(base_filter, (3, 3, 3), use_bias=False, padding='same')(u1)
    u1 = InstanceNormalization(axis=axis)(u1)
    u1 = LeakyReLU(alpha=0.3)(u1)
    output_img = Conv3D(num_labels, kernel_size=1, strides=1, padding='same', activation='sigmoid')(u1)
    if last_relu == True:
        output_img = ThresholdedReLU(theta=0.5)(output_img)
    model = Model(inputs=input_img, outputs=output_img)
    return model
Exemple #29
0
    def build_model(self,
                    img_shape=(32, 168, 168),
                    learning_rate=5e-5,
                    gpu_id=None,
                    nb_gpus=None,
                    trained_model=None,
                    temp=1.0):
        input_img = Input((*img_shape, 1), name='img_inp')
        unsupervised_label = Input((*img_shape, 5), name='unsup_label_inp')
        supervised_flag = Input(shape=img_shape, name='flag_inp')

        kernel_init = 'he_normal'
        sfs = 16  # start filter size
        bn = True
        do = True
        conv1, conv1_b_m = self.downLayer(input_img, sfs, 1, bn)
        conv2, conv2_b_m = self.downLayer(conv1, sfs * 2, 2, bn)

        conv3 = Conv3D(sfs * 4, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv' + str(3) + '_1')(conv2)
        if bn:
            conv3 = BatchNormalization()(conv3)
        conv3 = Conv3D(sfs * 8, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv' + str(3) + '_2')(conv3)
        if bn:
            conv3 = BatchNormalization()(conv3)
        pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
        # conv3, conv3_b_m = downLayer(conv2, sfs*4, 3, bn)

        conv4 = Conv3D(sfs * 16, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv4_1')(pool3)
        if bn:
            conv4 = BatchNormalization()(conv4)
        if do:
            conv4 = Dropout(0.5, seed=4,
                            name='Dropout_' + str(4))(conv4, training=True)
        conv4 = Conv3D(sfs * 16, (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv4_2')(conv4)
        if bn:
            conv4 = BatchNormalization()(conv4)

        # conv5 = upLayer(conv4, conv3_b_m, sfs*16, 5, bn, do)
        up1 = Conv3DTranspose(sfs * 16, (2, 2, 2),
                              strides=(2, 2, 2),
                              activation='relu',
                              padding='same',
                              name='up' + str(5))(conv4)
        up1 = concatenate([up1, conv3])
        conv5 = Conv3D(int(sfs * 8), (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv' + str(5) + '_1')(up1)
        if bn:
            conv5 = BatchNormalization()(conv5)
        if do:
            conv5 = Dropout(0.5, seed=5,
                            name='Dropout_' + str(5))(conv5, training=True)
        conv5 = Conv3D(int(sfs * 8), (3, 3, 3),
                       activation='relu',
                       padding='same',
                       kernel_initializer=kernel_init,
                       name='conv' + str(5) + '_2')(conv5)
        if bn:
            conv5 = BatchNormalization()(conv5)

        conv6 = self.upLayer(conv5, conv2_b_m, sfs * 8, 6, bn, do)
        conv7 = self.upLayer(conv6, conv1_b_m, sfs * 4, 7, bn, do)

        conv_out = Conv3D(5, (1, 1, 1), name='conv_final_softmax')(conv7)
        conv_out = Lambda(lambda x: x / temp)(conv_out)
        conv_out_sm = Activation('softmax')(conv_out)

        pz_sm_out = Lambda(lambda x: x[:, :, :, :, 0], name='pz')(conv_out_sm)
        cz_sm_out = Lambda(lambda x: x[:, :, :, :, 1], name='cz')(conv_out_sm)
        us_sm_out = Lambda(lambda x: x[:, :, :, :, 2], name='us')(conv_out_sm)
        afs_sm_out = Lambda(lambda x: x[:, :, :, :, 3],
                            name='afs')(conv_out_sm)
        bg_sm_out = Lambda(lambda x: x[:, :, :, :, 4], name='bg')(conv_out_sm)

        pz_ensemble_pred = Lambda(lambda x: x[:, :, :, :, 0],
                                  name='pzu')(unsupervised_label)
        cz_ensemble_pred = Lambda(lambda x: x[:, :, :, :, 1],
                                  name='czu')(unsupervised_label)
        us_ensemble_pred = Lambda(lambda x: x[:, :, :, :, 2],
                                  name='usu')(unsupervised_label)
        afs_ensemble_pred = Lambda(lambda x: x[:, :, :, :, 3],
                                   name='afsu')(unsupervised_label)
        bg_ensemble_pred = Lambda(lambda x: x[:, :, :, :, 4],
                                  name='bgu')(unsupervised_label)

        pz = K.stack([pz_ensemble_pred, supervised_flag])
        cz = K.stack([cz_ensemble_pred, supervised_flag])
        us = K.stack([us_ensemble_pred, supervised_flag])
        afs = K.stack([afs_ensemble_pred, supervised_flag])
        bg = K.stack([bg_ensemble_pred, supervised_flag])

        optimizer = AdamWithWeightnorm(lr=learning_rate,
                                       beta_1=0.9,
                                       beta_2=0.999)
        # optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999)

        if (nb_gpus is None):
            p_model = Model(
                [input_img, unsupervised_label, supervised_flag],
                [pz_sm_out, cz_sm_out, us_sm_out, afs_sm_out, bg_sm_out])
            if trained_model is not None:
                p_model.load_weights(trained_model, by_name=True)

            p_model.compile(optimizer=optimizer,
                            loss={
                                'pz':
                                self.semi_supervised_loss(
                                    pz, unsup_loss_class_wt=1),
                                'cz':
                                self.semi_supervised_loss(cz, 1),
                                'us':
                                self.semi_supervised_loss(us, 2),
                                'afs':
                                self.semi_supervised_loss(afs, 2),
                                'bg':
                                self.semi_supervised_loss(bg, 1)
                            },
                            metrics={
                                'pz': [
                                    self.dice_coef,
                                    self.unsup_dice_tb(pz, 1),
                                    self.dice_tb(pz, 1)
                                ],
                                'cz': [
                                    self.dice_coef,
                                    self.unsup_dice_tb(cz, 1),
                                    self.dice_tb(cz, 1)
                                ],
                                'us': [
                                    self.dice_coef,
                                    self.unsup_dice_tb(us, 2),
                                    self.dice_tb(us, 2)
                                ],
                                'afs': [
                                    self.dice_coef,
                                    self.unsup_dice_tb(afs, 2),
                                    self.dice_tb(afs, 2)
                                ],
                                'bg': [
                                    self.dice_coef,
                                    self.unsup_dice_tb(bg, 1),
                                    self.dice_tb(bg, 1)
                                ]
                            },
                            loss_weights={
                                'pz': 1,
                                'cz': 1,
                                'us': 2,
                                'afs': 2,
                                'bg': 1
                            })
        else:
            with tf.device(gpu_id):
                model = Model(
                    [input_img, unsupervised_label, supervised_flag],
                    [pz_sm_out, cz_sm_out, us_sm_out, afs_sm_out, bg_sm_out])
                if trained_model is not None:
                    model.load_weights(trained_model, by_name=True)

                p_model = multi_gpu_model(model, gpus=nb_gpus)
                p_model.compile(optimizer=optimizer,
                                loss={
                                    'pz':
                                    self.semi_supervised_loss(
                                        pz, unsup_loss_class_wt=1),
                                    'cz':
                                    self.semi_supervised_loss(cz, 1),
                                    'us':
                                    self.semi_supervised_loss(us, 2),
                                    'afs':
                                    self.semi_supervised_loss(afs, 2),
                                    'bg':
                                    self.semi_supervised_loss(bg, 1)
                                },
                                metrics={
                                    'pz': [
                                        self.dice_coef,
                                        self.unsup_dice_tb(pz, 1),
                                        self.dice_tb(pz, 1)
                                    ],
                                    'cz': [
                                        self.dice_coef,
                                        self.unsup_dice_tb(cz, 1),
                                        self.dice_tb(cz, 1)
                                    ],
                                    'us': [
                                        self.dice_coef,
                                        self.unsup_dice_tb(us, 1),
                                        self.dice_tb(us, 2)
                                    ],
                                    'afs': [
                                        self.dice_coef,
                                        self.unsup_dice_tb(afs, 1),
                                        self.dice_tb(afs, 2)
                                    ],
                                    'bg': [
                                        self.dice_coef,
                                        self.unsup_dice_tb(bg, 1),
                                        self.dice_tb(bg, 1)
                                    ]
                                },
                                loss_weights={
                                    'pz': 1,
                                    'cz': 1,
                                    'us': 2,
                                    'afs': 2,
                                    'bg': 1
                                })

        return p_model
def get_net_multiPlane():
    filterFactor = 1
    #### tra branch #####
    inputs_tra = Input((168, 168, 168, 1))
    conv1_tra = Conv3D(8 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(inputs_tra)
    conv1_tra = Conv3D(16 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv1_tra)
    pool1_tra = MaxPooling3D(pool_size=(2, 2, 2))(conv1_tra)

    conv2_tra = Conv3D(16 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(pool1_tra)
    conv2_tra = Conv3D(32 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv2_tra)
    pool2_tra = MaxPooling3D(pool_size=(2, 2, 2))(conv2_tra)

    conv3_tra = Conv3D(32 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(pool2_tra)
    conv3_tra = Conv3D(64 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv3_tra)
    pool3_tra = MaxPooling3D(pool_size=(2, 2, 2))(conv3_tra)

    ###### cor branch #####

    inputs_cor = Input((168, 168, 168, 1))
    conv1_cor = Conv3D(8 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(inputs_cor)
    conv1_cor = Conv3D(16 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv1_cor)
    pool1_cor = MaxPooling3D(pool_size=(2, 2, 2))(conv1_cor)

    conv2_cor = Conv3D(16 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(pool1_cor)
    conv2_cor = Conv3D(32 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv2_cor)
    pool2_cor = MaxPooling3D(pool_size=(2, 2, 2))(conv2_cor)

    conv3_cor = Conv3D(32 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(pool2_cor)
    conv3_cor = Conv3D(64 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv3_cor)
    pool3_cor = MaxPooling3D(pool_size=(2, 2, 2))(conv3_cor)

    ###### sag branch #####

    inputs_sag = Input((168, 168, 168, 1))
    conv1_sag = Conv3D(8 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(inputs_sag)
    conv1_sag = Conv3D(16 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv1_sag)
    pool1_sag = MaxPooling3D(pool_size=(2, 2, 2))(conv1_sag)

    conv2_sag = Conv3D(16 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(pool1_sag)
    conv2_sag = Conv3D(32 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv2_sag)
    pool2_sag = MaxPooling3D(pool_size=(2, 2, 2))(conv2_sag)

    conv3_sag = Conv3D(32 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(pool2_sag)
    conv3_sag = Conv3D(64 * filterFactor, (3, 3, 3),
                       activation='relu',
                       padding='same')(conv3_sag)
    pool3_sag = MaxPooling3D(pool_size=(2, 2, 2))(conv3_sag)

    merge = concatenate([pool3_tra, pool3_cor, pool3_sag])

    conv4 = Conv3D(192 * filterFactor, (3, 3, 3),
                   activation='relu',
                   padding='same')(merge)
    conv4 = Conv3D(128 * filterFactor, (3, 3, 3),
                   activation='relu',
                   padding='same')(conv4)

    up6 = Conv3DTranspose(128, (2, 2, 2),
                          strides=(2, 2, 2),
                          activation='relu',
                          padding='same')(conv4)
    up6 = concatenate([up6, conv3_tra, conv3_cor, conv3_sag])
    conv6 = Conv3D(64 * filterFactor, (3, 3, 3),
                   activation='relu',
                   padding='same')(up6)
    conv6 = Conv3D(64 * filterFactor, (3, 3, 3),
                   activation='relu',
                   padding='same')(conv6)

    up7 = Conv3DTranspose(64, (2, 2, 2),
                          strides=(2, 2, 2),
                          activation='relu',
                          padding='same')(conv6)
    up7 = concatenate([up7, conv2_tra, conv2_cor, conv2_sag])
    conv7 = Conv3D(32 * filterFactor, (3, 3, 3),
                   activation='relu',
                   padding='same')(up7)
    conv7 = Conv3D(32 * filterFactor, (3, 3, 3),
                   activation='relu',
                   padding='same')(conv7)

    up8 = Conv3DTranspose(32, (2, 2, 2),
                          strides=(2, 2, 2),
                          activation='relu',
                          padding='same')(conv7)
    up8 = concatenate([up8, conv1_tra, conv1_cor, conv1_sag])
    conv8 = Conv3D(16 * filterFactor, (3, 3, 3),
                   activation='relu',
                   padding='same')(up8)
    conv8 = Conv3D(16 * filterFactor, (3, 3, 3),
                   activation='relu',
                   padding='same')(conv8)

    conv10 = Conv3D(1, (1, 1, 1), activation='sigmoid')(conv8)

    model = Model(inputs=[inputs_tra, inputs_sag, inputs_cor],
                  outputs=[conv10])

    return model