def squeeze_excite_block_3D(input_tensor1, input_tensor2, ratio=16):
    """ Create a channel-wise squeeze-excite block
    Args:
        input_tensor: input Keras tensor
        ratio: number of output filters
    Returns: a Keras tensor
    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    """
    filters_3D = input_tensor1._keras_shape[-1]
    #print("The filter 2D shape:", filters_2D)
    se_shape_3D = (1, 1, 1, filters_3D)

    se_GAP2D1 = GlobalAveragePooling3D()(input_tensor1)
    se_GAP2D2 = GlobalAveragePooling3D()(input_tensor2)

    filters = filters_3D
    #print("The filter sum:", filters)
    # concatenate them
    #merged = Concatenate(axis=1)([se_GAP3D, se_GAP2D])  #tf.keras.layers.Concatenate(axis=1)([x1, x2])
    merged = add([se_GAP2D1, se_GAP2D2])
    #print("The concadinate shape:", merged.shape)

    #3D operations
    se_2D1 = Reshape(se_shape_3D)(merged)
    se_2D1 = Dense(filters // ratio,
                   activation='relu',
                   kernel_initializer='he_normal',
                   use_bias=False)(se_2D1)
    se_2D1 = Dense(filters,
                   activation='sigmoid',
                   kernel_initializer='he_normal',
                   use_bias=False)(se_2D1)

    #2D operations
    se_2D2 = Reshape(se_shape_3D)(merged)
    se_2D2 = Dense(filters // ratio,
                   activation='relu',
                   kernel_initializer='he_normal',
                   use_bias=False)(se_2D2)
    se_2D2 = Dense(filters,
                   activation='sigmoid',
                   kernel_initializer='he_normal',
                   use_bias=False)(se_2D2)

    t1_se = multiply([input_tensor1, se_2D1])
    t2_se = multiply([input_tensor2, se_2D2])

    return t1_se, t2_se
Пример #2
0
def resnet_3d(input_shape=(5, 256, 256, 1), n_classes=10, depth=18):

    inpt = Input(input_shape)

    # conv1-stem: 7x7x7 stride1x2x2
    x = ConvBN(inpt,
               64,
               7,
               strides=(1, 2, 2),
               padding='same',
               activation='relu')
    x = MaxPooling3D(pool_size=2, strides=2, padding='same')(x)

    num_blocks = n_blocks[depth]
    # conv2
    x = resBlock(x, n_filters[0], strides=(1, 2, 2), n_blocks=num_blocks[0])

    # conv3
    x = resBlock(x, n_filters[1], strides=(1, 2, 2), n_blocks=num_blocks[1])

    # conv4
    x = resBlock(x, n_filters[2], strides=(2, 2, 2), n_blocks=num_blocks[2])

    # conv5
    x = resBlock(x, n_filters[3], strides=(2, 2, 2), n_blocks=num_blocks[3])

    # head
    x = GlobalAveragePooling3D()(x)
    x = Dense(400, activation='relu')(x)
    x = Dense(n_classes, activation='softmax')(x)

    # model
    model = Model(inpt, x)

    return model
def get_DenseNet_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT,
                    CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    x = Conv3D(DENSE_NET_INITIAL_CONV_DIM, (3, 3, 3), padding='same')(inputs)
    print('input')
    print(x.get_shape())

    for i in range(DENSE_NET_BLOCKS):
        x = dense_block(x)
        if i != DENSE_NET_BLOCKS - 1:
            x = transition_block(x)

    print('top')
    x = GlobalAveragePooling3D()(x)
    print(x.get_shape())

    if DENSE_NET_ENABLE_DROPOUT:
        x = Dropout(DENSE_NET_DROPOUT)(x)

    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
Пример #4
0
def create_second_model(patch_size=(40, 40, 40), drop_rate=0.2):
    if not isinstance(patch_size, tuple):
        patch_size = (patch_size, patch_size, patch_size)

    m_in = Input(shape=(*patch_size, 1))

    x = Conv3D(64, 3, activation='relu', kernel_initializer='he_normal')(m_in)
    x = Conv3D(64, 3, activation='relu', kernel_initializer='he_normal')(x)
    x = MaxPooling3D()(x)
    x = Dropout(drop_rate)(x)

    x = Conv3D(128, 3, activation='relu', kernel_initializer='he_normal')(m_in)
    x = Conv3D(128, 3, activation='relu', kernel_initializer='he_normal')(x)
    x = MaxPooling3D()(x)
    x = Dropout(drop_rate)(x)

    x = Conv3D(256, 3, activation='relu', kernel_initializer='he_normal')(m_in)
    x = Conv3D(256, 3, activation='relu', kernel_initializer='he_normal')(x)
    x = MaxPooling3D()(x)
    x = Dropout(drop_rate)(x)

    x = GlobalAveragePooling3D()(x)
    x = Dense(1024, activation='relu', kernel_initializer='he_normal')(x)
    x = Dropout(drop_rate)(x)
    x = Dense(512, activation='relu', kernel_initializer='he_normal')(x)
    x = Dropout(drop_rate)(x)
    m_out = Dense(2, activation='softmax', kernel_initializer='he_normal')(x)

    model = Model(m_in, m_out)
    return model
def disc_net():
    stride = 2
    input_layer = Input(shape=(data_shape[0], data_shape[1], data_shape[2], 1))

    num_filters_start = 32

    nb_conv = 5
    filters_list = [num_filters_start * min(8, (2**i)) for i in range(nb_conv)]

    disc_out = Conv3D(num_filters_start,
                      kernel_size=3,
                      strides=stride,
                      padding='same',
                      name='disc_conv_1')(input_layer)
    disc_out = LeakyReLU(alpha=0.2)(disc_out)

    for i, filter_size in enumerate(filters_list[1:]):
        name = 'disc_conv_{}'.format(i + 2)

        disc_out = Conv3D(filter_size,
                          kernel_size=3,
                          strides=stride,
                          padding='same',
                          name=name)(disc_out)
        disc_out = BatchNormalization(name=name + '_bn')(disc_out)
        disc_out = LeakyReLU(alpha=0.2)(disc_out)

    disc_out = GlobalAveragePooling3D()(disc_out)

    disc_out = Dense(1, activation='sigmoid', name="disc_dense")(disc_out)

    dis_model = Model(input=input_layer, output=disc_out, name="patch_gan")

    return dis_model
Пример #6
0
    def affine(self, fixed, moving):

        outputs = Concatenate(axis=-1, name='affine_concat')([fixed, moving])
        features = 16
        while K.int_shape(outputs)[1] > 7:
            outputs = self.convolution(outputs,
                                       n_filters=features,
                                       pool='down',
                                       name='affine_block' + str(features))
            features = features * 2
        outputs = GlobalAveragePooling3D(name='affine_gpool')(outputs)
        outputs = Dense(1024, activation='relu', name='fc')(outputs)

        # build affine transform matrix
        W = Dense(9, name='affine_W')(outputs)
        W = Reshape((1, 3, 3, 1), name='affine_W_reshape')(W)
        b = Dense(3, name='affine_b')(outputs)
        b = Reshape((1, 3, 1, 1), name='affine_b_reshape')(b)

        # apply transform
        concat_fn = lambda x: K.squeeze(K.squeeze(K.concatenate(x, 3), 1),
                                        axis=-1)
        transform = Lambda(function=concat_fn, name='A')([W, b])
        wrapped = SpatialTransformer(name='wrapA')([moving, transform])

        return transform, wrapped
Пример #7
0
def squeeze_excite_block(input, ratio=16):
    ''' Create a channel-wise squeeze-excite block

    Args:
        input: input tensor
        filters: number of output filters

    Returns: a keras tensor

    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    se_shape = (1, 1, 1, filters)

    se = GlobalAveragePooling3D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((4, 1, 2, 3))(se)

    x = multiply([init, se])
    return x
Пример #8
0
def get_3d_model3(input_shape=(64, 64, 64, 1), NUM_CLASSES=2):
    ## input layer
    input_layer = Input(input_shape)

    ## convolutional layers

    conv_layer1 = my_conv3d(input_layer, filters=32, kernal_size=(7, 7, 7), strides=2, padding='same')

    conv_layer2 = my_conv3d(conv_layer1, filters=32, kernal_size=(5, 5, 5), padding='same')

    conv_layer3 = my_conv3d(conv_layer2, filters=32, kernal_size=(3, 3, 3), padding='same')

    conv_layer4 = my_conv3d(conv_layer3, filters=32, kernal_size=(3, 3, 3), padding='same')

    conv_layer5 = my_conv3d(conv_layer4, filters=32, kernal_size=(3, 3, 3), padding='same')


    GAP = GlobalAveragePooling3D(name='avg_pool')(conv_layer5)

    output_layer = Dense(units=NUM_CLASSES, activation='softmax')(GAP)

    ## define the model with input layer and output layer
    model = Model(inputs=input_layer, outputs=output_layer)


    return model
Пример #9
0
def run(method, nrows, epochs, optimizer, trainable):
    # df, classes = getTrainNodules(TRAIN_NODULES_PATH, nrows = nrows)
    # train, valid = splitData(df, shuffle=True)
    # training_generator, validation_generator = getDataGenerators(train, valid, classes, method=method, batch_size=BATCH_SIZE)

    NAME = '{}_i3d'.format(method)
    _, classes = getTrainNodules(TRAIN_NODULES_PATH, nrows=None)

    # Load the I3D model
    base_model = I3D(weights='rgb_imagenet_and_kinetics',
                     include_top=False,
                     input_shape=(IMAGE_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3),
                     dropout_prob=DROPOUT_PROB)

    if not trainable:
        for layer in base_model.layers:
            layer.trainable = False

    # Add classification layers
    x = base_model.output
    x = GlobalAveragePooling3D()(x)
    x = Dropout(DROPOUT_PROB)(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(len(classes), activation='softmax')(x)

    opt = 'adam' if optimizer == 'adam' else SGD()

    model = Model(inputs=base_model.input, outputs=predictions)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    print("Final Model summary")
    model.summary()

    callbacks = [
        CSVLogger('results/' + NAME + '.csv', append=True, separator=',')
    ]

    print()
    model.save_weights('weights/' + NAME + '_initial.h5')
    for fold in range(0, NUM_FOLDS):
        train, valid = getFoldNodules(nrows=nrows, fold=fold, shuffle=True)
        training_generator, validation_generator = getDataGenerators(
            train, valid, classes, method=method, batch_size=BATCH_SIZE)

        # Fit model
        print('Fold', fold)
        model.fit_generator(generator=training_generator,
                            steps_per_epoch=ceil(train[0].size / BATCH_SIZE),
                            validation_data=validation_generator,
                            validation_steps=ceil(valid[0].size / BATCH_SIZE),
                            epochs=epochs,
                            callbacks=callbacks,
                            shuffle=True,
                            verbose=1)
        model.load_weights('weights/' + NAME + '_initial.h5')
        print()

    model.save('weights/' + NAME + '.h5')
Пример #10
0
    def deconv3d(layer_input, skip_input, filters, axis=-1, se_res_block=True, se_ratio=16):
        u1 = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(layer_input)
        u1 = Conv3DTranspose(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(u1)
        u1 = InstanceNormalization(axis=axis)(u1)
#         u1 = LeakyReLU(alpha=0.3)(u1)
        u1 = Activation('selu')(u1)
        u1 = CropToConcat3D()([u1, skip_input])
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
        u2 = InstanceNormalization(axis = axis)(u2)
#         u2 = LeakyReLU(alpha=0.3)(u2)
        u2 = Activation('selu')(u2)
        u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u2)
        u2 = InstanceNormalization(axis = axis)(u2)
        if se_res_block == True:
            se = GlobalAveragePooling3D()(u2)
            se = Dense(filters // se_ratio, activation='relu')(se)
            se = Dense(filters, activation='sigmoid')(se)
            se = Reshape([1, 1, 1, filters])(se)
            u2 = Multiply()([u2, se])
            shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1)
            shortcut = InstanceNormalization(axis=axis)(shortcut)
            u2 = layers.add([u2, shortcut])
#         u2 = LeakyReLU(alpha=0.3)(u2)
        u2 = Activation('selu')(u2)
        return u2
Пример #11
0
def Conv3D_Classes(args, classes):
    # shape = (seqlength, imgsize, imgsize, channels)
    input_shape = (args.seqlength, args.imgsize, args.imgsize, 3)

    model = Sequential()
    # first layer
    model.add(Conv3D(32, (3, 3, 3), activation='relu',
                     input_shape=input_shape))
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))

    # second layer
    model.add(Conv3D(64, (3, 3, 3), activation='relu'))
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))

    # 3rd layer
    model.add(Conv3D(128, (3, 3, 3), activation='relu'))
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))
    model.add(BatchNormalization())
    # 4th layer
    #model.add(Conv3D(256, (2,2,2), activation='relu'))
    #model.add(MaxPooling3D(pool_size=(1,2,2), strides=(1,2,2)))
    #model.add(BatchNormalization())

    model.add(GlobalAveragePooling3D())

    model.add(Dense(128))
    model.add(Dropout(args.dropout))
    model.add(Dense(classes, activation='softmax'))

    return model
Пример #12
0
def se_block(input_feature, ratio=8):
    """Contains the implementation of Squeeze-and-Excitation(SE) block.
    As described in https://arxiv.org/abs/1709.01507.
    """

    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    channel = input_feature._keras_shape[channel_axis]

    se_feature = GlobalAveragePooling3D()(input_feature)
    se_feature = Reshape((1, 1, channel))(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel)
    se_feature = Dense(channel // ratio,
                       activation='relu',
                       kernel_initializer='he_normal',
                       use_bias=True,
                       bias_initializer='zeros')(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel // ratio)
    se_feature = Dense(channel,
                       activation='sigmoid',
                       kernel_initializer='he_normal',
                       use_bias=True,
                       bias_initializer='zeros')(se_feature)
    assert se_feature._keras_shape[1:] == (1, 1, channel)
    if K.image_data_format() == 'channels_first':
        se_feature = Permute((3, 1, 2))(se_feature)

    se_feature = multiply([input_feature, se_feature])
    return se_feature
Пример #13
0
def spatial_squeeze_channel_excite_block3D(input, kernel_init, ratio=2):
    ''' Create a squeeze-excite block
    Args:
        input: input tensor
        filters: number of output filters
        k: width factor

    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    se_shape = (1, 1, 1, filters)

    se = GlobalAveragePooling3D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio,
               activation='elu',
               kernel_initializer=kernel_init,
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer=kernel_init,
               use_bias=False)(se)

    if K.image_data_format() == 'channels_first':
        se = Permute((4, 1, 2, 3))(se)

    x = multiply([init, se])
    return x
Пример #14
0
 def conv3d(layer_input,
            filters,
            axis=-1,
            se_res_block=True,
            se_ratio=16,
            down_sizing=True):
     if down_sizing == True:
         layer_input = MaxPooling3D(pool_size=(2, 2, 2))(layer_input)
     d = Conv3D(filters, (3, 3, 3), use_bias=False,
                padding='same')(layer_input)
     d = InstanceNormalization(axis=axis)(d)
     d = LeakyReLU(alpha=0.3)(d)
     d = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(d)
     d = InstanceNormalization(axis=axis)(d)
     if se_res_block == True:
         se = GlobalAveragePooling3D()(d)
         se = Dense(filters // se_ratio, activation='relu')(se)
         se = Dense(filters, activation='sigmoid')(se)
         se = Reshape([1, 1, 1, filters])(se)
         d = Multiply()([d, se])
         shortcut = Conv3D(filters, (3, 3, 3),
                           use_bias=False,
                           padding='same')(layer_input)
         shortcut = InstanceNormalization(axis=axis)(shortcut)
         d = layers.add([d, shortcut])
     d = LeakyReLU(alpha=0.3)(d)
     return d
Пример #15
0
def get_mini_model(width=128, height=128, depth=64):
    """Build a 3D convolutional neural network model."""
    inputs = keras.Input((depth, height, width, 1))
    x = Conv3D(filters=64, kernel_size=3, activation="relu")(inputs)
    x = MaxPooling3D(pool_size=2)(x)
    x = BatchNormalization()(x)

    # x = Conv3D(filters=64, kernel_size=3, activation="relu")(x)
    # x = MaxPooling3D(pool_size=2)(x)
    # x = BatchNormalization()(x)

    x = Conv3D(filters=128, kernel_size=3, activation="relu")(x)
    x = AveragePooling3D(pool_size=2)(x)
    x = BatchNormalization()(x)

    x = Conv3D(filters=256, kernel_size=3, activation="relu")(x)
    x = AveragePooling3D(pool_size=2)(x)
    x = BatchNormalization()(x)

    x = GlobalAveragePooling3D()(x)
    x = Dense(units=512, activation="relu")(x)
    # x = Dropout(0.3)(x)
    # x = Flatten()(x)
    # outputs = Dense(units=4, activation="softmax")(x)

    # Define the model.
    model = keras.Model(inputs, x)
    return model
Пример #16
0
def DenseNet(kernel1, kernel2, kernel3, numlayers, droprate, addD):
    weight_decay = 1E-4
    model_input1 = Input((50, 50, 50, 1), name='image')
    nb_layers = 3
    x = BatchNormalization()(model_input1)
    x = Conv3D(kernel3, (3, 3, 3),
               kernel_initializer="he_uniform",
               name="initial_conv3D",
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = dense_block(x, numlayers, kernel1)
    x = transition(x, kernel2, droprate)
    x = dense_block(x, numlayers, kernel1)
    x = transition(x, kernel2, droprate)
    if addD == 1:
        x = dense_block(x, 1, kernel1)
    x = BatchNormalization(axis=-1,
                           gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    x = ReLU()(x)
    x = GlobalAveragePooling3D()(x)
    f1 = Dense(n_intervals,
               kernel_initializer='zeros',
               bias_initializer='zeros')(x)
    out = Activation('sigmoid')(f1)
    model = Model(inputs=[model_input1], outputs=[out])
    return model
Пример #17
0
def squeeze_excite_block(input_tensor, ratio=8):
    """ Create a channel-wise squeeze-excite block
    Args:
        input_tensor: input Keras tensor
        ratio: number of output filters
    Returns: a Keras tensor
    References
    -   [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
    """
    #init = input_tensor
    #channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = input_tensor._keras_shape[-1]  #_tensor_shape(init)[channel_axis]
    se_shape = (1, 1, 1, filters)

    se = GlobalAveragePooling3D()(input_tensor)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)

    x = multiply([input_tensor, se])
    return x
Пример #18
0
def getCNN(n_classes):
    """
        This is the current working CNN.
        classes is the number of classes (neurons in the final softmax layer) to be processed.
        If finetune==True, only allow the final two levels to be trainable.
    """
    # Neural net (two-channel)
    # leaky_relu replaced with relu. Max pooling replaced with strides in conv layers. 2018-05-18
    inp = Input(shape=(32,32,32,2))

    # First layer:
    conv_0 = Conv3D(32, [4,4,4], strides=2,  activation="relu")(inp) # [16,16,16]

    # Second layer:
    conv_1 = Conv3D(64, [4,4,4], strides=2, activation="relu")(conv_0) # [8,8,8]

    # Third layer:
    conv_2 = Conv3D(128, [2,2,2], activation="relu")(conv_1)

    # Fourth layer:
    conv_3 = Conv3D(256, [2,2,2], activation="relu")(conv_2)

    # Global pooling layer:
    global_pool_0 = GlobalAveragePooling3D()(conv_3)

    # Output layer:
    fc_0 = Dense(n_classes, activation='softmax')(global_pool_0)

    model = Model(inputs=inp, outputs=fc_0)
    return model
Пример #19
0
def _se_block(inputs, filters, se_ratio=16):
    x = GlobalAveragePooling3D()(inputs)
    x = Dense(filters // se_ratio, activation='relu')(x)
    x = Dense(filters, activation='sigmoid')(x)
    x = Reshape([1, 1, 1, filters])(x)
    x = Multiply()([inputs, x])
    return x
Пример #20
0
def squeeze_excitation_block_3D(inputSE, ratio=16):
    '''
    Creates a squeeze and excitation block
    :param input: input tensor
    :param ratio: reduction ratio r for bottleneck given by the two FC layers
    :return: keras tensor
    '''

    if backend.image_data_format() == 'channels_first':
        channels = 1
    else:
        channels = -1

    # number of input filters/channels
    inputSE_shape = backend.int_shape(inputSE)
    numChannels = inputSE_shape[channels]

    #squeeze operation
    output = GlobalAveragePooling3D(
        data_format=backend.image_data_format())(inputSE)

    #excitation operation
    output = Dense(numChannels // ratio,
                   activation='relu',
                   use_bias=True,
                   kernel_initializer='he_normal')(output)
    output = Dense(numChannels,
                   activation='sigmoid',
                   use_bias=True,
                   kernel_initializer='he_normal')(output)

    #scale operation
    output = multiply([inputSE, output])

    return output
Пример #21
0
def SENet_Block(x_input, out_dims, reduction_ratio=4):
    residual_abs = Lambda(abs_backend, name="abs_se" + str(out_dims))(x_input)

    x = Conv3D(out_dims, (1, 1, 1), padding='same', use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(5e-4))(x_input)

    abs_mean = GlobalAveragePooling3D()(x)

    # scales = Dense(units=out_dims // reduction_ratio, activation=None, kernel_initializer='he_normal',
    #                kernel_regularizer=l2(5e-4))(abs_mean)
    # scales = Activation('relu')(scales)
    # scales = Dense(units=out_dims)(scales)
    # scales = Activation('sigmoid')(scales)
    # scales = Reshape((1, 1, 1, out_dims))(scales)

    scales = Reshape((1, 1, 1, out_dims))(abs_mean)
    scales = Conv3D(filters=out_dims // reduction_ratio, kernel_size=1,
                    use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(5e-4))(scales)
    scales = Activation('relu')(scales)
    scales = Conv3D(filters=out_dims, kernel_size=1,
                    use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(5e-4))(scales)
    scales = Activation('sigmoid')(scales)

    # thres = multiply([abs_mean, scales])
    thres = multiply([scales, x])

    # Soft thresholding
    sub = keras.layers.subtract([residual_abs, thres])
    zeros = keras.layers.subtract([sub, sub])
    n_sub = keras.layers.maximum([sub, zeros])
    residual = keras.layers.multiply([Lambda(sign_backend, name="sign_se" + str(out_dims))(x_input), n_sub])
    # residual = keras.layers.multiply([Lambda(sign_backend)(x_input),
    #                                   keras.layers.maximum([Lambda(abs_mean)(x_input) - thres, 0])])

    return residual
Пример #22
0
def se_ClassNet():
    inputs = Input(shape=(280, 280, 16, 1), name='input1')
    # 256*256*128
    print("input shape:", inputs.shape)  # (?, 140, 140, 16, 64)
    out = Conv3D(64,
                 7,
                 strides=(2, 2, 1),
                 padding='same',
                 kernel_initializer='he_normal',
                 use_bias=False,
                 name='conv1')(inputs)
    print("conv0 shape:", out.shape)  #(?, 140, 140, 16, 64)
    out = BatchNormalization(axis=-1, epsilon=1e-6, name='bn1')(out)
    out = Activation('relu')(out)
    out = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding='same')(out)
    print("pooling1 shape:", out.shape)  #(?, 70, 70, 16, 64)

    # stage1=================================================
    out = conv_block(out, [64, 64, 256], name='L1_block1')
    print("conv1 shape:", out.shape)
    out = se_identity_block(out, [64, 64, 256], name='L1_block2')
    out = se_identity_block(out, [64, 64, 256], name='L1_block3')

    # stage2=================================================
    out = conv_block(out, [128, 128, 512], name='L2_block1')
    print("conv2 shape:", out.shape)
    out = se_identity_block(out, [128, 128, 512], name='L2_block2')
    out = se_identity_block(out, [128, 128, 512], name='L2_block3')
    out = se_identity_block(out, [128, 128, 512], name='L2_block4')

    # stage3=================================================
    out = conv_block(out, [256, 256, 1024], name='L3_block1')
    print("conv3 shape:", out.shape)
    out = se_identity_block(out, [256, 256, 1024], name='L3_block2')
    out = se_identity_block(out, [256, 256, 1024], name='L3_block3')
    out = se_identity_block(out, [256, 256, 1024], name='L3_block4')
    out = se_identity_block(out, [256, 256, 1024], name='L3_block5')
    out = se_identity_block(out, [256, 256, 1024], name='L3_block6')

    # stage4=================================================
    out = conv_block(out, [512, 512, 2048], name='L4_block1')
    print("conv4 shape:", out.shape)
    out = se_identity_block(out, [512, 512, 2048], name='L4_block2')
    out = se_identity_block(out, [512, 512, 2048], name='L4_block3')

    out = GlobalAveragePooling3D(data_format='channels_last')(out)
    print("Gpooling shape:", out.shape)
    out_drop = Dropout(rate=0.3)(out)
    out = Dense(1, name='fc1')(out_drop)
    print("out shape:", out.shape)
    #out = Dense(1, name = 'fc1')(out)
    output = Activation(activation='sigmoid')(out)

    model = Model(input=inputs, output=output)
    #mean_squared_logarithmic_error or binary_crossentropy
    model.compile(optimizer=SGD(lr=1e-6, momentum=0.9),
                  loss=EuiLoss,
                  metrics=[y_t, y_pre, Acc])
    return model
Пример #23
0
def fCreateModel_FCN_simple(patchSize,
                            dr_rate=0.0,
                            iPReLU=0,
                            l1_reg=0.0,
                            l2_reg=1e-6):
    # Total params: 1,223,831
    # Replace the dense layer with a convolutional layer with filters=2 for the two classes
    Strides = fgetStrides()
    kernelnumber = fgetKernelNumber()
    inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]),
                       int(patchSize[2])))

    after_Conv_1 = fCreateVNet_Block(inp,
                                     kernelnumber[0],
                                     type=fgetLayerNumConv(),
                                     l2_reg=l2_reg)
    after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1,
                                                  after_Conv_1._keras_shape[1],
                                                  Strides[0],
                                                  iPReLU=iPReLU,
                                                  dr_rate=dr_rate,
                                                  l2_reg=l2_reg)

    after_Conv_2 = fCreateVNet_Block(after_DownConv_1,
                                     kernelnumber[1],
                                     type=fgetLayerNumConv(),
                                     l2_reg=l2_reg)
    after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2,
                                                  after_Conv_2._keras_shape[1],
                                                  Strides[1],
                                                  iPReLU=iPReLU,
                                                  dr_rate=dr_rate,
                                                  l2_reg=l2_reg)

    after_Conv_3 = fCreateVNet_Block(after_DownConv_2,
                                     kernelnumber[2],
                                     type=fgetLayerNumConv(),
                                     l2_reg=l2_reg)
    after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3,
                                                  after_Conv_3._keras_shape[1],
                                                  Strides[2],
                                                  iPReLU=iPReLU,
                                                  dr_rate=dr_rate,
                                                  l2_reg=l2_reg)

    dropout_out = Dropout(dr_rate)(after_DownConv_3)
    fclayer = Conv3D(
        2,
        kernel_size=(1, 1, 1),
        kernel_initializer='he_normal',
        weights=None,
        padding='valid',
        strides=(1, 1, 1),
        kernel_regularizer=l1_l2(l1_reg, l2_reg),
    )(dropout_out)
    fclayer = GlobalAveragePooling3D()(fclayer)
    outp = Activation('softmax')(fclayer)
    cnn_spp = Model(inputs=inp, outputs=outp)
    return cnn_spp
Пример #24
0
def resnext_or(classes=2):
    inputs = Input(shape=(280, 280, 16, 1), name='input1')
    # 256*256*128
    print("input shape:", inputs.shape)  # (?, 140, 140, 16, 64)
    out = Conv3D(64, 7, strides=(2, 2, 1), padding='same', kernel_initializer='he_normal', use_bias=False, name='conv1')(inputs)
    print("conv0 shape:", out.shape)#(?, 140, 140, 16, 64)
    out = BatchNormalization(axis = -1, epsilon=1e-6, name='bn1')(out)
    out = Activation('relu')(out)
    out = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding='same')(out)
    print("pooling1 shape:", out.shape)#(?, 70, 70, 16, 64)

    out = conv_block_or(out, [64, 64, 256], name='L1_block1')  # 一定是[n,n,Xn]这个形式(因为有分组卷积),X可取任意值,且输出通道数为Xn,另外n最好是32的整数(因为分组卷积是分32组的,当然这个可以自己改)
    print("conv1 shape:", out.shape)
    out = identity_block(out, [64, 64, 256], name='L1_block2')  # 一定是[n,n,a]的形式,a一定要等于上一个conv_block或identity_block的输出通道,identity_block的输入输出通道相同。
    out = identity_block(out, [64, 64, 256], name='L1_block3')

    out = conv_block_or(out, [128, 128, 512], name='L2_block1')
    print("conv2 shape:", out.shape)
    out = identity_block(out, [128, 128, 512], name='L2_block2')
    out = identity_block(out, [128, 128, 512], name='L2_block3')
    out = identity_block(out, [128, 128, 512], name='L2_block4')

    out = conv_block_or(out, [256, 256, 1024], name='L3_block1')
    print("conv3 shape:", out.shape)
    out = identity_block(out, [256, 256, 1024], name='L3_block2')
    out = identity_block(out, [256, 256, 1024], name='L3_block3')
    out = identity_block(out, [256, 256, 1024], name='L3_block4')
    out = identity_block(out, [256, 256, 1024], name='L3_block5')
    out = identity_block(out, [256, 256, 1024], name='L3_block6')

    out = conv_block_or(out, [512, 512, 2048], name='L4_block1')
    print("conv4 shape:", out.shape)
    out = identity_block(out, [512, 512, 2048], name='L4_block2')
    out = identity_block(out, [512, 512, 2048], name='L4_block3')

    out = GlobalAveragePooling3D(data_format = 'channels_last')(out)
    print("Gpooling shape:", out.shape)
    out_drop = Dropout(rate=0.3)(out)




    if classes == 1:
        output = Dense(classes, activation='sigmoid', use_bias=use_bias_flag, name='fc1')(out_drop)
        print("predictions1 shape:", output.shape, 'activition:sigmoid')
    else:
        output = Dense(classes, activation='softmax', use_bias=use_bias_flag, name='fc1')(out_drop)
        print("predictions2 shape:", output.shape, 'activition:softmax')


    #out = Dense(classes, name = 'fc1')(out_drop)
    #print("out shape:", out.shape)
    #out = Dense(1, name = 'fc1')(out)
    #output = Activation(activation = 'sigmoid')(out)

    model = Model(input = inputs, output = output)
    #mean_squared_logarithmic_error or binary_crossentropy
    #model.compile(optimizer=SGD(lr = 1e-6, momentum = 0.9), loss = EuiLoss, metrics= [y_t, y_pre, Acc] )
    return model
Пример #25
0
def DenseNet3D(input_shape, growth_rate=32, block_config=(6, 12, 24, 16),
               num_init_features=64, bn_size=4, drop_rate=0, num_classes=5):
    r"""Densenet-BC model class, based on
    `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
    Args:
        growth_rate (int) - how many filters to add each layer (`k` in paper)
        block_config (list of 4 ints) - how many layers in each pooling block
        num_init_features (int) - the number of filters to learn in the first convolution layer
        bn_size (int) - multiplicative factor for number of bottle neck layers
          (i.e. bn_size * k features in the bottleneck layer)
        drop_rate (float) - dropout rate after each dense layer
        num_classes (int) - number of classification classes
    """
    #-----------------------------------------------------------------
    inp_2d = (Input(shape=(224,224,3), name='2d_input'))
    batch_densenet = densenet.DenseNet169(include_top=False, input_shape=(224,224,3), input_tensor=inp_2d, weights='imagenet')
    
    for layer in batch_densenet.layers:
        layer.trainable = False

    # Configure the 2D CNN to take batches of pictures
    inp_2d_batch = (Input(shape=input_shape, name='2d_input_batch'))
    batch_densenet = TimeDistributed(batch_densenet)(inp_2d_batch)
    batch_densenet = Model(inputs=inp_2d_batch, outputs=batch_densenet)
    #-----------------------------------------------------------------

    # inp_3d = (Input(shape=input_shape, name='3d_input'))
    t3d = T3D169(include_top=False, input_shape=input_shape)

    #--------------from 2d densenet model-----------------
    x = GlobalAveragePooling3D(name='avg_pool_t3d')(t3d.output)
    y = GlobalAveragePooling3D(name='avg_pool_densnet3d')(batch_densenet.output)

    #-----------------------------------------------------
    x = keras.layers.concatenate([x,y])
    x = Dropout(0.65)(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.35)(x)
    out = Dense(num_classes, activation='softmax')(x)

    model = Model(inputs=[inp_2d_batch, t3d.input], outputs=[out])
    # model.summary()

    return model
def Fast_body(x, layers, block):
    fast_inplanes = 8
    lateral = []
    x = Conv_BN_ReLU(8, kernel_size=(5, 7, 7), strides=(1, 2, 2))(x)
    x = MaxPool3D(pool_size=(1, 3, 3), strides=(1, 2, 2), padding='same')(x)
    lateral_p1 = Conv3D(8 * 2,
                        kernel_size=(5, 1, 1),
                        strides=(8, 1, 1),
                        padding='same',
                        use_bias=False)(x)
    lateral.append(lateral_p1)
    x, fast_inplanes = make_layer_fast(x,
                                       block,
                                       8,
                                       layers[0],
                                       head_conv=3,
                                       fast_inplanes=fast_inplanes)
    lateral_res2 = Conv3D(32 * 2,
                          kernel_size=(5, 1, 1),
                          strides=(8, 1, 1),
                          padding='same',
                          use_bias=False)(x)
    lateral.append(lateral_res2)
    x, fast_inplanes = make_layer_fast(x,
                                       block,
                                       16,
                                       layers[1],
                                       stride=2,
                                       head_conv=3,
                                       fast_inplanes=fast_inplanes)
    lateral_res3 = Conv3D(64 * 2,
                          kernel_size=(5, 1, 1),
                          strides=(8, 1, 1),
                          padding='same',
                          use_bias=False)(x)
    lateral.append(lateral_res3)
    x, fast_inplanes = make_layer_fast(x,
                                       block,
                                       32,
                                       layers[2],
                                       stride=2,
                                       head_conv=3,
                                       fast_inplanes=fast_inplanes)
    lateral_res4 = Conv3D(128 * 2,
                          kernel_size=(5, 1, 1),
                          strides=(8, 1, 1),
                          padding='same',
                          use_bias=False)(x)
    lateral.append(lateral_res4)
    x, fast_inplanes = make_layer_fast(x,
                                       block,
                                       64,
                                       layers[3],
                                       stride=2,
                                       head_conv=3,
                                       fast_inplanes=fast_inplanes)
    x = GlobalAveragePooling3D()(x)
    return x, lateral
def BILSTM_CNN_3D(model_base, classes=7, fc_finals=[512, 512], fc_dropout=[0.1, 0.0, 0.0],
                  lstm_cell=1024, lstm_layers=1, lstm_dropout=0.2, lstm_recurrent_dropout=0.2):

    model = Sequential()
    model.add(TimeDistributed(model_base, input_shape=model_base.input_shape))  # (batch_size, frames, features)
    for i in range(lstm_layers):
        model.add(Bidirectional(LSTM(lstm_cell, return_sequences=True, dropout=lstm_dropout, recurrent_dropout=lstm_recurrent_dropout)))
    model.add(TimeDistributed(Reshape((32, 32, 1)), input_shape=input_shape))# (batch_size, frames, features)

    # 1st layer group
    model.add(Convolution3D(64, (3, 3, 3), padding='same', name='conv1_1', strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution3D(64, (3, 3, 3), padding='same', name='conv1_2', strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool1'))
    # 2nd layer group
    model.add(Convolution3D(128, (3, 3, 3), padding='same', name='conv2_1',strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution3D(128, (3, 3, 3), padding='same', name='conv2_2',strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2'))
    # 3nd layer group
    model.add(Convolution3D(256, (3, 3, 3), padding='same', name='conv3_1',strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution3D(256, (3, 3, 3), padding='same', name='conv3_2',strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution3D(256, (3, 3, 3), padding='same', name='conv3_3',strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3'))
    # 4nd layer group
    model.add(Convolution3D(512, (3, 3, 3), padding='same', name='conv4_1',strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution3D(512, (3, 3, 3), padding='same', name='conv4_2',strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Convolution3D(512, (3, 3, 3), padding='same', name='conv4_3',strides=(1, 1, 1)))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4'))
    model.add(GlobalAveragePooling3D())
    # FC layers group
    if fc_dropout[0]>0: model.add(Dropout(fc_dropout[0]))
    if fc_finals[0]>0: model.add(Dense(fc_finals[0], activation='relu', name='fc1'))
    if fc_dropout[1]>0: model.add(Dropout(fc_dropout[1]))
    if fc_finals[1]>0: model.add(Dense(fc_finals[1], activation='relu', name='fc2'))
    if fc_dropout[2]>0: model.add(Dropout(fc_dropout[2]))
    model.add(Dense(classes, activation='softmax', name='predictions'))
    return model
# CNN_3D_BATCHNORM_01
Пример #28
0
def phinet(n_classes, model_path, num_channels=1, learning_rate=1e-3, num_gpus=1, verbose=0):
    inputs = Input(shape=(None,None,None,num_channels))

    x = Conv3D(64, (3,3,3), strides=(2,2,2), padding='same')(inputs)
    x = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same')(x)

    x = Conv3D(64, (3,3,3), strides=(2,2,2), padding='same')(x)
    x = BatchNormalization()(x)
    y = Activation('relu')(x)
    x = Conv3D(64, (3,3,3), strides=(1,1,1), padding='same')(y)
    x = BatchNormalization()(x)
    x = add([x, y])
    x = Activation('relu')(x)

    # this block will pool a handful of times to get the "big picture" 
    y = MaxPooling3D(pool_size=(5,5,5), strides=(2,2,2), padding='same')(inputs)
    y = AveragePooling3D(pool_size=(3,3,3), strides=(2,2,2), padding='same')(y)
    y = Conv3D(64, (3,3,3), strides=(1,1,1), padding='same')(y)

    # this layer will preserve original signal
    z = Conv3D(64, (3,3,3), strides=(2,2,2), padding='same')(inputs)
    z = Conv3D(64, (3,3,3), strides=(2,2,2), padding='same')(z)
    z = Conv3D(64, (3,3,3), strides=(1,1,1), padding='same')(z)

    x = Concatenate(axis=4)([x, y, z])

    # global avg pooling before FC
    x = GlobalAveragePooling3D()(x)
    x = Dense(n_classes)(x)

    pred = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=pred)

    model.compile(optimizer=Adam(lr=learning_rate),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    if verbose:
        print(model.summary())

    # save json before checking if multi-gpu
    json_string = model.to_json()
    with open(model_path, 'w') as f:
        json.dump(json_string, f)

    print(model.summary())

    # recompile if multi-gpu model
    if num_gpus > 1:
        model = ModelMGPU(model, num_gpus)
        model.compile(optimizer=Adam(lr=learning_rate),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    return model
def squeeze_excitation_layer(x, out_dim, radio=4, activation=LeakyReLU):

    squeeze = GlobalAveragePooling3D()(x)
    excitation = Dense(units=out_dim // radio)(squeeze)
    excitation = activation()(excitation)
    excitation = Dense(units=out_dim)(excitation)
    excitation = Activation('sigmoid')(excitation)
    excitation = Reshape((out_dim, 1, 1, 1))(excitation)
    scale = multiply([x, excitation])
    return scale
Пример #30
0
    def _squeeze(self, inputs):
        input_channels = int(inputs.shape[-1])

        x = GlobalAveragePooling3D()(inputs)
        x = Dense(input_channels, activation='relu')(x)
        x = Dense(input_channels, activation='hard_sigmoid')(x)
        x = Reshape((1, 1, 1, input_channels))(x)
        x = Multiply()([inputs, x])

        return x