def get_Inception_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT, CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    print('inputs')
    print(inputs.get_shape())

    # Make inception base
    x = inception_base(inputs)

    for i in range(INCEPTION_BLOCKS):
        x = inception_block(x, filters=INCEPTION_KEEP_FILTERS)

        if (i + 1) % INCEPTION_REDUCTION_STEPS == 0 and i != INCEPTION_BLOCKS - 1:
            x = reduction_block(x, filters=INCEPTION_KEEP_FILTERS // 2)

    print('top')
    x = GlobalMaxPooling3D()(x)
    print(x.get_shape())
    x = Dropout(INCEPTION_DROPOUT)(x)
    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model_s = Model(inputs=inputs, outputs=x)
    model = multi_gpu_model(model_s, gpus=4)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE), loss='binary_crossentropy', metrics=['accuracy'])

    return model,model_s
Пример #2
0
def get_model(n_ch=32):
    input = Input(shape=(96, 160, 160, 1))
    C1_0 = Conv3D(n_ch, (3, 3, 3), padding="valid")(input)
    C1_0 = BatchNormalization()(C1_0)
    C1_0 = Activation('relu')(C1_0)
    C1_1 = Conv3D(n_ch, (3, 3, 3), padding="valid", activation='relu')(C1_0)
    C1_0 = BatchNormalization()(C1_0)
    C1_0 = Activation('relu')(C1_0)
    MP1 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(C1_1)
    D1 = Dropout(0.25)(MP1)

    C2_0 = Conv3D(2*n_ch, (3, 3, 3), padding="valid", activation='relu')(D1)
    C2_1 = Conv3D(2*n_ch, (3, 3, 3), padding="valid", activation='relu')(C2_0)
    MP2 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(C2_1)
    D2 = Dropout(0.25)(MP2)

    C3_0 = Conv3D(4 * n_ch, (3, 3, 3), padding="valid", activation='relu')(D2)
    C3_1 = Conv3D(4 * n_ch, (3, 3, 3), padding="valid", activation='relu')(C3_0)
    MP3 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(C3_1)
    D3 = Dropout(0.25)(MP3)

    C4_0 = Conv3D(8 * n_ch, (3, 3, 3), padding="valid", activation='relu')(D3)
    C4_1 = Conv3D(8 * n_ch, (3, 3, 3), padding="valid", activation='relu')(C4_0)
    MP4 = GlobalMaxPooling3D()(C4_1)
    D4 = Dropout(0.25)(MP4)

    Den2 = Dense(600, activation='softmax')(D4)
    model = Model(inputs=input, outputs=Den2)
    return model
Пример #3
0
def get_ResNet_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT,
                    CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))

    x = conv_bn_relu(inputs, RESNET_INITIAL_FILTERS)

    print('base')
    print(x.get_shape())

    for i in range(RESNET_BLOCKS):
        x = bottleneck(x, shrinkage=(i % RESNET_SHRINKAGE_STEPS == 0))

    print('top')
    x = GlobalMaxPooling3D()(x)
    print(x.get_shape())

    x = Dense(2, activation='softmax')(x)
    print(x.get_shape())

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
Пример #4
0
def attention_block(input, iter, depth):
    global_pool = GlobalMaxPooling3D(data_format='channels_first')(input)
    global_pool1 = Reshape((depth, 1, 1, 1))(global_pool)
    conv_1x1 = Conv3D(depth, (1, 1, 1),
                      padding='same',
                      data_format='channels_first')(global_pool1)
    relu_out = Activation('relu')(conv_1x1)
    conv_2x1 = Conv3D(depth, (1, 1, 1),
                      strides=(1, 1, 1),
                      padding='same',
                      data_format='channels_first')(relu_out)
    sigmoid_out = Activation('sigmoid')(conv_2x1)
    concat1 = sigmoid_out
    #print("***********1")
    #print(concat1.shape)
    for i in range(4 - 1):
        concat1 = concatenate([concat1, sigmoid_out], axis=2)
    concat2 = concat1
    for j in range(iter - 1):
        concat2 = concatenate([concat2, concat1], axis=3)
    concat3 = concat2
    for k in range(iter - 1):
        concat3 = concatenate([concat3, concat2], axis=4)
    #print("************2")
    #print(concat3.shape)
    out = Multiply()([input, concat3])
    return out
Пример #5
0
def objdet_model_fn(mc_input_shape):
    """ Returns an instance of the 1x1 simulated tile MC """
    mc_input = Input(shape=mc_input_shape, name="objdet/input")
    mc_output = Conv2D(128,
                       1,
                       strides=(1, 1),
                       padding="same",
                       activation="relu",
                       name="objdet/conv1")(mc_input)
    mc_output = Conv2D(64,
                       1,
                       strides=(1, 1),
                       padding="same",
                       activation="relu",
                       name="objdet/conv2")(mc_output)
    mc_output = Conv2D(64,
                       1,
                       strides=(1, 1),
                       padding="same",
                       name="objdet/conv3")(mc_output)
    cur_shape = keras.backend.int_shape(mc_output)
    mc_output = Reshape([*cur_shape[1:], 1], name="objdet/reshape")(mc_output)
    mc_output = GlobalMaxPooling3D(data_format="channels_last",
                                   name="objdet/maxpool")(mc_output)
    mc_output = Activation("sigmoid")(mc_output)
    mc_model = Model(inputs=mc_input, outputs=mc_output, name="objdet")
    return mc_model
Пример #6
0
def model(input_shape=(103, 50, 50, 1), classes=9):
    X_input = Input(input_shape)

    print(X_input.shape)
    X = feature_extraction(X_input)

    X = GlobalMaxPooling3D()(X)
    print(X.shape)
    X = Dense(classes, input_dim=256, activation='softmax', name='fc' + str(classes),
              kernel_initializer=glorot_uniform(seed=0))(X)

    model = Model(inputs=X_input, outputs=X, name="3D-SRNet")

    return model
Пример #7
0
def get_full_VGG_classifier():
    inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT,
                    CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
    x = inputs

    x = Conv3D(32, (3, 3, 3), padding='same', activation='relu')(x)
    x = Conv3D(32, (3, 3, 3), padding='same', activation='relu')(x)
    x = MaxPooling3D(pool_size=(2, 2, 2))(x)

    if TRAIN_CLASSIFY_USE_BN:
        x = BatchNormalization()(x)

    x = Conv3D(64, (3, 3, 3), padding='same', activation='relu')(x)
    x = Conv3D(64, (3, 3, 3), padding='same', activation='relu')(x)
    x = MaxPooling3D(pool_size=(2, 2, 2))(x)

    if TRAIN_CLASSIFY_USE_BN:
        x = BatchNormalization()(x)

    x = Conv3D(128, (3, 3, 3), padding='same', activation='relu')(x)
    x = Conv3D(128, (3, 3, 3), padding='same', activation='relu')(x)
    x = Conv3D(128, (3, 3, 3), padding='same', activation='relu')(x)
    x = MaxPooling3D(pool_size=(2, 2, 2))(x)

    if TRAIN_CLASSIFY_USE_BN:
        x = BatchNormalization()(x)

    x = Conv3D(256, (3, 3, 3), padding='same', activation='relu')(x)
    x = Conv3D(256, (3, 3, 3), padding='same', activation='relu')(x)
    x = Conv3D(256, (3, 3, 3), padding='same', activation='relu')(x)
    x = MaxPooling3D(pool_size=(2, 2, 2))(x)

    if TRAIN_CLASSIFY_USE_BN:
        x = BatchNormalization()(x)

    x = Conv3D(512, (3, 3, 3), padding='same', activation='relu')(x)
    x = Conv3D(512, (3, 3, 3), padding='same', activation='relu')(x)
    x = Conv3D(512, (3, 3, 3), padding='same', activation='relu')(x)
    x = GlobalMaxPooling3D()(x)

    x = Dense(32, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(2, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=x)
    model.compile(optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
def squeezenet(input_dim, num_classes):
    img_input = Input(shape=input_dim)
    x = Convolution3D(
        64,
        kernel_size=(3, 3, 3),
        strides=(2, 2, 2),
        padding="same",
        # kernel_regularizer=l2(l2_lambda),
        # kernel_initializer='he_uniform',
        activation="relu",
        name='sqconv1')(img_input)
    x = MaxPooling3D(pool_size=(3, 3, 3),
                     strides=(2, 2, 2),
                     name='maxpool1',
                     padding="valid")(x)

    x = firemodule(x, (16, 64, 64), None)
    x = firemodule(x, (16, 64, 64), None)

    x = MaxPooling3D(pool_size=(3, 3, 3),
                     strides=(2, 2, 2),
                     name='maxpool3',
                     padding="valid")(x)
    x = firemodule(x, (32, 128, 128), None)
    x = firemodule(x, (32, 128, 128), None)
    x = MaxPooling3D(pool_size=(3, 3, 3),
                     strides=(2, 2, 2),
                     name='maxpool5',
                     padding="valid")(x)
    x = firemodule(x, (48, 192, 192), None)
    x = firemodule(x, (48, 192, 192), None)
    x = firemodule(x, (64, 256, 256), None)
    x = firemodule(x, (64, 256, 256), None)
    # Dropout after the last Fire Module
    # x = Dropout(0.2)(x)
    # x = BatchNormalization()(x)

    x = GlobalMaxPooling3D(name="maxpool10")(x)
    x = Dense(
        num_classes,
        init='normal',
        # kernel_regularizer=l2(l2_lambda),
        # kernel_initializer='he_uniform',
    )(x)
    x = Activation('softmax')(x)

    model = Model(img_input, x, name="squeezenet3d")

    return model
Пример #9
0
    def cnn_3d(self):
        # input_shape = self.train_data.shape[1:]
        # Create CNN architecture
        self.kernel_size = 3
        self.activation = 'relu'
        # self.activation = LeakyReLU(alpha=0.3)
        self.stride = 1
        self.num_conv_layers = 2
        self.curr_resblock = []
        inputs = Input(shape=(None, None, None, 1))

        # conv_input = Conv3D(filters=8, kernel_size=7, strides=self.stride,
        #               padding='same', kernel_initializer=keras.initializers.he_normal(seed=7), kernel_regularizer=l2(1e-4))
        # c1 = conv_input(inputs)
        # c1 = BatchNormalization(axis=4)(c1)
        # c1 = Activation(self.activation)(c1)

        for res_block in range(self.depth):
            self.curr_resblock = res_block
            print('Creating ResBlock: ', res_block)
            if res_block == 0:
                x = BuildNetwork.res_block_3d(self, inputs)
            else:
                x = BuildNetwork.res_block_3d(self, x)
            self.num_filters *= 2
        gp = GlobalMaxPooling3D()(x)
        FC1 = Dense(
            6,
            activation=self.activation,
            kernel_initializer=keras.initializers.he_normal(seed=7))(gp)
        DP1 = Dropout(0.5)(FC1)
        outputs = Dense(self.num_class, activation='softmax')(DP1)
        cnn3d = Model(inputs=inputs, outputs=outputs)
        cnn3d.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=self.lr,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-6,
                                     amsgrad=True),
                      metrics=['accuracy'])
        plot_model(cnn3d, to_file='CNN3D.png')
        return cnn3d
Пример #10
0
def squeeze_excite_block(input, ratio=8):
    init = input
    channel_axis = 1 if K.backend.image_data_format(
    ) == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    se_shape = (1, 1, filters)
    se = GlobalMaxPooling3D()(init)
    se = Reshape(se_shape)(se)
    se = Dense(filters // ratio,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    if K.backend.image_data_format() == 'channels_first':
        se = Permute((3, 1, 2))(se)
    x = multiply([init, se])
    return x
Пример #11
0
    def get_model(self, learning_rate):
        inputs = Input((CLASSIFY_INPUT_WIDTH, CLASSIFY_INPUT_HEIGHT,
                        CLASSIFY_INPUT_DEPTH, CLASSIFY_INPUT_CHANNEL))
        x = inputs

        x = Conv3D(32, (3, 3, 3), padding='same', activation='relu')(x)
        x = MaxPooling3D(pool_size=(2, 2, 2))(x)

        x = BatchNormalization()(x)

        x = Conv3D(64, (3, 3, 3), padding='same', activation='relu')(x)
        x = MaxPooling3D(pool_size=(2, 2, 2))(x)

        x = BatchNormalization()(x)

        x = Conv3D(128, (3, 3, 3), padding='same', activation='relu')(x)
        x = MaxPooling3D(pool_size=(2, 2, 2))(x)

        x = BatchNormalization()(x)

        x = Conv3D(256, (3, 3, 3), padding='same', activation='relu')(x)
        x = MaxPooling3D(pool_size=(2, 2, 2))(x)

        x = BatchNormalization()(x)

        x = Conv3D(512, (3, 3, 3), padding='same', activation='relu')(x)
        x = GlobalMaxPooling3D()(x)

        x = Dense(32, activation='relu')(x)
        #x = Dropout(0.5)(x)
        x = Dense(CLASSIFY_OUTPUT_CHANNEL, activation='softmax')(x)

        model = Model(inputs=inputs, outputs=x)
        #optimizer=Adam(lr=TRAIN_CLASSIFY_LEARNING_RATE)
        model.compile(optimizer=RMSprop(lr=learning_rate),
                      loss='categorical_crossentropy',
                      metrics=[categorical_accuracy])

        return model
Пример #12
0
def channel_att(input_feature, ratio=8):
    print(K.image_data_format())
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    channel = input_feature._keras_shape[channel_axis]

    shared_layer_one = Dense(channel // ratio,
                             activation='relu',
                             kernel_initializer='he_normal',
                             use_bias=True,
                             bias_initializer='zeros')
    shared_layer_two = Dense(channel,
                             kernel_initializer='he_normal',
                             use_bias=True,
                             bias_initializer='zeros')

    avg_pool = GlobalAveragePooling3D()(input_feature)
    avg_pool = Reshape((1, 1, 1, channel))(avg_pool)
    assert avg_pool._keras_shape[1:] == (1, 1, 1, channel)
    avg_pool = shared_layer_one(avg_pool)
    assert avg_pool._keras_shape[1:] == (1, 1, 1, channel // ratio)
    avg_pool = shared_layer_two(avg_pool)
    assert avg_pool._keras_shape[1:] == (1, 1, 1, channel)

    max_pool = GlobalMaxPooling3D()(input_feature)
    max_pool = Reshape((1, 1, 1, channel))(max_pool)
    assert max_pool._keras_shape[1:] == (1, 1, 1, channel)
    max_pool = shared_layer_one(max_pool)
    assert max_pool._keras_shape[1:] == (1, 1, 1, channel // ratio)
    max_pool = shared_layer_two(max_pool)
    assert max_pool._keras_shape[1:] == (1, 1, 1, channel)

    cbam_feature = Add()([avg_pool, max_pool])
    cbam_feature = Activation('sigmoid')(cbam_feature)

    if K.image_data_format() == "channels_first":
        cbam_feature = Permute((3, 1, 2))(cbam_feature)

    return multiply([input_feature, cbam_feature])
Пример #13
0
def resnet3d_model(input_shape=(dimz, dimx, dimy, channelNum),
                   num_outputs=4,
                   n_base_filters=16,
                   depth=5,
                   dropout_rate=0.3,
                   optimizer=Adam,
                   initial_learning_rate=5e-4,
                   loss_function=weighted_dice_coefficient_loss,
                   kernel_reg_factor=1e-4,
                   ifbase=False):
    """
    :param input_shape:
    :param n_base_filters:
    :param depth:
    :param dropout_rate:
    :param n_labels:
    :param optimizer:
    :param initial_learning_rate:
    :param loss_function:
    :param activation_name:
    :return:
    """
    inputs = Input(input_shape)

    current_layer = inputs
    level_output_layers = list()
    level_filters = list()

    for level_number in range(depth):
        n_level_filters = (2**level_number) * n_base_filters
        level_filters.append(n_level_filters)

        if current_layer is inputs:
            in_conv = create_convolution_block(current_layer,
                                               n_level_filters,
                                               kernel=(5, 5, 5),
                                               strides=(2, 2, 2))
            # in_conv = create_convolution_block(in_conv, n_level_filters, kernel=(5, 5, 5), strides=(2, 2, 2))
            in_conv = MaxPooling3D(pool_size=(3, 3, 3),
                                   strides=(2, 2, 2),
                                   padding="same")(in_conv)
        else:
            in_conv = create_convolution_block(current_layer,
                                               n_level_filters,
                                               kernel=(3, 3, 3),
                                               strides=(2, 2, 2))

        context_output_layer = create_context_module(in_conv,
                                                     n_level_filters,
                                                     dropout_rate=dropout_rate)
        summation_layer = Add()([in_conv, context_output_layer])

        level_output_layers.append(summation_layer)
        current_layer = summation_layer

    output_layer = create_convolution_block(summation_layer,
                                            n_level_filters,
                                            kernel=(1, 1, 1))
    pool1 = GlobalMaxPooling3D(data_format='channels_last')(output_layer)

    if ifbase == True:
        model = Model(inputs=inputs, outputs=pool1)
        return model
    else:
        flatten1 = Flatten()(pool1)
        if num_outputs > 1:
            dense = Dense(units=num_outputs,
                          kernel_initializer="he_normal",
                          activation="softmax",
                          kernel_regularizer=l2(kernel_reg_factor))(flatten1)
        else:
            dense = Dense(units=num_outputs,
                          kernel_initializer="he_normal",
                          activation="sigmoid",
                          kernel_regularizer=l2(kernel_reg_factor))(flatten1)
        model = Model(inputs=inputs, outputs=dense)
    model.compile(optimizer=optimizer(lr=initial_learning_rate),
                  loss=loss_function)
    return model
def res_next32(input_shape,
               initial_learning_rate=0.00001,
               batch_normalization=True,
               activation_name="sigmoid",
               activation=ReLU,
               opt='Adam'):
    base_filters = 16
    inputs = Input(input_shape)
    current_layer = inputs
    print(current_layer._keras_shape)
    layer1 = create_convolution_block(input_layer=current_layer,
                                      kernel=(3, 3, 3),
                                      n_filters=base_filters,
                                      padding='same',
                                      batch_normalization=batch_normalization,
                                      activation=activation)
    print(layer1._keras_shape)
    #############
    resx1 = create_next_block(input_layer=layer1,
                              n_filters=base_filters * 2,
                              padding='same',
                              batch_normalization=batch_normalization,
                              activation=activation)
    rex1_match = create_convolution_block(
        input_layer=layer1,
        kernel=(1, 1, 1),
        n_filters=base_filters * 4,
        padding='same',
        batch_normalization=batch_normalization,
        activation=None)
    rex1_out = create_elewise_block(input1=resx1,
                                    input2=rex1_match,
                                    activation=activation)
    #############
    resx2 = create_next_block(input_layer=rex1_out,
                              n_filters=base_filters * 2,
                              padding='same',
                              batch_normalization=batch_normalization,
                              activation=activation)
    rex2_out = create_elewise_block(input1=resx2,
                                    input2=rex1_out,
                                    activation=activation)
    print(rex2_out._keras_shape)
    #############
    resx3 = create_next_block(input_layer=rex2_out,
                              n_filters=base_filters * 4,
                              strides=(2, 2, 2),
                              padding='same',
                              batch_normalization=batch_normalization,
                              activation=activation)
    rex3_match = create_convolution_block(
        input_layer=rex2_out,
        kernel=(1, 1, 1),
        strides=(2, 2, 2),
        n_filters=base_filters * 8,
        padding='same',
        batch_normalization=batch_normalization,
        activation=None)
    rex3_out = create_elewise_block(input1=resx3,
                                    input2=rex3_match,
                                    activation=activation)
    #############
    resx4 = create_next_block(input_layer=rex3_out,
                              n_filters=base_filters * 4,
                              padding='same',
                              batch_normalization=batch_normalization,
                              activation=activation)
    rex4_out = create_elewise_block(input1=resx4,
                                    input2=rex3_out,
                                    activation=activation)
    print(rex4_out._keras_shape)
    #############
    resx5 = create_next_block(input_layer=rex4_out,
                              n_filters=base_filters * 8,
                              strides=(2, 2, 2),
                              padding='same',
                              batch_normalization=batch_normalization,
                              activation=activation)
    rex5_match = create_convolution_block(
        input_layer=rex4_out,
        kernel=(1, 1, 1),
        strides=(2, 2, 2),
        n_filters=base_filters * 16,
        padding='same',
        batch_normalization=batch_normalization,
        activation=None)
    rex5_out = create_elewise_block(input1=resx5,
                                    input2=rex5_match,
                                    activation=activation)
    #############
    resx6 = create_next_block(input_layer=rex5_out,
                              n_filters=base_filters * 8,
                              padding='same',
                              batch_normalization=batch_normalization,
                              activation=activation)
    rex6_out = create_elewise_block(input1=resx6,
                                    input2=rex5_out,
                                    activation=activation)
    print(rex6_out._keras_shape)
    #############
    resx7 = create_next_block(input_layer=rex6_out,
                              n_filters=base_filters * 16,
                              strides=(2, 2, 2),
                              padding='same',
                              batch_normalization=batch_normalization,
                              activation=activation)
    rex7_match = create_convolution_block(
        input_layer=rex6_out,
        kernel=(1, 1, 1),
        strides=(2, 2, 2),
        n_filters=base_filters * 32,
        padding='same',
        batch_normalization=batch_normalization,
        activation=None)
    rex7_out = create_elewise_block(input1=resx7,
                                    input2=rex7_match,
                                    activation=activation)
    #############
    resx8 = create_next_block(input_layer=rex7_out,
                              n_filters=base_filters * 16,
                              padding='same',
                              batch_normalization=batch_normalization,
                              activation=activation)
    rex8_out = create_elewise_block(input1=resx8,
                                    input2=rex7_out,
                                    activation=activation)
    print(rex8_out._keras_shape)
    layer7 = GlobalMaxPooling3D(data_format="channels_first")(rex8_out)
    print(layer7._keras_shape)
    layer7 = Dropout(rate=0.3)(layer7)
    layer8 = Dense(1, activation='sigmoid')(layer7)
    print(layer8._keras_shape)
    model = Model(inputs=inputs, outputs=layer8)
    if opt == 'Adam':
        model.compile(optimizer=Adam(lr=initial_learning_rate),
                      loss=[focal_loss_fixed],
                      metrics=['accuracy'])
    elif opt == 'SGD':
        model.compile(optimizer=SGD(lr=initial_learning_rate),
                      loss=[focal_loss_fixed],
                      metrics=['accuracy'])
    return model
Пример #15
0
def buildModel(inputShape, args):

    print inputShape
    nChannels = inputShape[-1]

    input_img = Input(shape=inputShape)
    x = input_img

    if args.batchNorm: x = BatchNormalization()(x)

    fo = args.filters
    fi = fo * 2

    b = args.bconv
    b = 5

    firstPool = (4, 4, 4)
    secondPool = (2, 2, 2)

    #x = MaxPooling3D(pool_size=(2,2,2))(x)
    x = AveragePooling3D(pool_size=(2, 2, 2))(x)

    a = 5
    x = Convolution3D(4, (a, a, a), activation='relu', padding='same')(x)
    x = MaxPooling3D(name='pool1', pool_size=(3, 3, 3))(x)

    s1 = Dense(4, activation='sigmoid')(Flatten()(x))
    nodule1 = Dense(1, activation='sigmoid', name='cancer1')(s1)

    b = 3
    x = Convolution3D(4, (b, b, b), activation='relu', padding='same')(x)
    x = MaxPooling3D(pool_size=(3, 3, 3))(x)

    s2 = Dense(4, activation='sigmoid')(Flatten()(x))
    nodule2 = Dense(1, activation='sigmoid', name='cancer2')(s2)

    #x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(pool1)
    #if args.doubleLayers: x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(x)
    #pool2 = MaxPooling3D(name='pool2', pool_size=secondPool)(x)

    #x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(pool2)
    #if args.doubleLayers: x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(x)
    #pool3 = MaxPooling3D(pool_size=(2,2,2))(x)

    #l = Convolution3D(16, (1, 1, 1), activation='relu', padding='same')(pool3)

    #x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(pool2)
    #if args.doubleLayers: x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(x)
    #pool3 = MaxPooling3D(name='pool3', pool_size=(2,2,2))(x)

    #x = Convolution3D(fi, (5, 5, 5), activation='relu', padding='same')(pool3)
    #pool4 = MaxPooling3D(pool_size=(2,2,2))(x)

    encoded = x
    encoder = Model(inputs=[input_img], outputs=[encoded])
    '''
	if args.flat=='flat': flat = Flatten()(encoded)
	else:
		#flat = GlobalAveragePooling3D()(encoded)
		flat = GlobalMaxPooling3D()(encoded)
	'''

    #flat = Flatten()(encoded)
    #flat = GlobalAveragePooling3D()(encoded)
    flat = GlobalMaxPooling3D()(encoded)

    if args.dropout: flat = Dropout(args.dropout)(flat)

    shared = Dense(args.sharedNeurons, activation='sigmoid')(flat)
    #shared = Dense(args.sharedNeurons, bias_initializer='zero')(flat)
    #if args.dropout: shared = Dropout(args.dropout)(shared)
    nodule = Dense(1, activation='sigmoid', name='cancer')(shared)

    loss = {
        'cancer': 'binary_crossentropy',
        'cancer1': 'binary_crossentropy',
        'cancer2': 'binary_crossentropy'
    }
    metrics = {'cancer': 'accuracy'}

    if args.autoencoder:

        loss['imgOut'] = 'mse'
        metrics['imgOut'] = 'mae'

        x = UpSampling3D(size=secondPool)(pool2)
        #x = UpSampling3D(size=(5,5,5))(pool2)
        x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(x)
        if args.doubleLayers:
            x = Convolution3D(fi, (3, 3, 3), activation='relu',
                              padding='same')(x)

        #x = UpSampling3D()(x)
        #x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(x)
        #if args.doubleLayers: x = Convolution3D(fi, (3, 3, 3), activation='relu', padding='same')(x)

        x = UpSampling3D(size=firstPool)(x)
        #x = UpSampling3D(size=(3,3,3))(x)
        if args.doubleLayers:
            x = Convolution3D(fo, (3, 3, 3), activation='relu',
                              padding='same')(x)
        decoded = Convolution3D(nChannels, (3, 3, 3),
                                activation='relu',
                                padding='same',
                                name='imgOut')(x)

        model = Model(inputs=[input_img],
                      outputs=[nodule, decoded],
                      name='multiOut')

    else:

        model = Model(inputs=[input_img],
                      outputs=[nodule, nodule1, nodule2],
                      name='multiOut')

    print 'hidden layer shape: ', encoder.output_shape

    # optomizers: adadelta, sgd, rmsprop, adam, nadam

    model.compile(optimizer=args.optimizer, loss=loss, metrics=metrics)

    print model.summary()

    return model
def model_3d_1(input_shape,
               initial_learning_rate=0.00001,
               batch_normalization=True,
               instance_normalization=False,
               activation_name="sigmoid",
               opt='Adam'):
    base_fiters = 64
    inputs = Input(input_shape)
    current_layer = inputs
    print(current_layer._keras_shape)

    layer1_1 = create_convolution_block(
        input_layer=current_layer,
        kernel=(3, 3, 3),
        n_filters=base_fiters,
        name='1_1',
        padding='same',
        batch_normalization=batch_normalization,
        instance_normalization=instance_normalization,
        activation=LeakyReLU)
    layer1_2 = create_convolution_block(
        input_layer=layer1_1,
        kernel=(3, 3, 3),
        n_filters=base_fiters,
        name='1_2',
        padding='same',
        batch_normalization=batch_normalization,
        instance_normalization=instance_normalization,
        activation=LeakyReLU)
    print(layer1_2._keras_shape)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2), name='pool1')(layer1_2)
    print('pool1:', pool1._keras_shape)
    layer2_1 = create_convolution_block(
        input_layer=pool1,
        kernel=(3, 3, 3),
        n_filters=base_fiters * 2,
        name='2_1',
        padding='same',
        batch_normalization=batch_normalization,
        instance_normalization=instance_normalization,
        activation=LeakyReLU)
    layer2_2 = create_convolution_block(
        input_layer=layer2_1,
        kernel=(3, 3, 3),
        n_filters=base_fiters * 2,
        name='2_2',
        padding='same',
        batch_normalization=batch_normalization,
        instance_normalization=instance_normalization,
        activation=LeakyReLU)
    print(layer2_2._keras_shape)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), name='pool2')(layer2_2)
    print('pool2:', pool2._keras_shape)
    layer3_1 = create_convolution_block(
        input_layer=pool2,
        kernel=(3, 3, 3),
        n_filters=base_fiters * 4,
        name='3_1',
        padding='same',
        batch_normalization=batch_normalization,
        instance_normalization=instance_normalization,
        activation=LeakyReLU)
    layer3_2 = create_convolution_block(
        input_layer=layer3_1,
        kernel=(3, 3, 3),
        n_filters=base_fiters * 4,
        name='3_2',
        padding='same',
        batch_normalization=batch_normalization,
        instance_normalization=instance_normalization,
        activation=LeakyReLU)
    print(layer3_2._keras_shape)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), name='pool3')(layer3_2)
    print('pool3:', pool3._keras_shape)
    layer4_1 = create_convolution_block(
        input_layer=pool3,
        kernel=(3, 3, 3),
        n_filters=base_fiters * 8,
        name='4_1',
        padding='same',
        batch_normalization=batch_normalization,
        instance_normalization=instance_normalization,
        activation=LeakyReLU)
    layer4_2 = create_convolution_block(
        input_layer=layer4_1,
        kernel=(3, 3, 3),
        n_filters=base_fiters * 8,
        name='4_2',
        padding='same',
        batch_normalization=batch_normalization,
        instance_normalization=instance_normalization,
        activation=LeakyReLU)
    print(layer4_2._keras_shape)
    #############
    layer7 = GlobalMaxPooling3D(data_format="channels_first",
                                name='Gpool')(layer4_2)
    print(layer7._keras_shape)
    layer7 = Dropout(rate=0.3, name='dropout1')(layer7)
    layer8 = Dense(1, activation='sigmoid', name='dense1')(layer7)
    print(layer8._keras_shape)
    model = Model(inputs=inputs, outputs=layer8)
    if opt == 'Adam':
        model.compile(optimizer=Adam(lr=initial_learning_rate),
                      loss="binary_crossentropy",
                      metrics=['accuracy'])
    elif opt == 'SGD':
        model.compile(optimizer=SGD(lr=initial_learning_rate),
                      loss=[focal_loss(alpha=.25, gamma=2)],
                      metrics=['accuracy'])
    return model
Пример #17
0
def YOPO_feature(image_size):
    kernel_initializer = keras.initializers.orthogonal()
    bias_initializer = keras.initializers.zeros()


    input_shape = (image_size, image_size, image_size, 1)                                                                                                                           
                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                      
    main_input = Input(shape= input_shape, name='input_1')
    
    x = GaussianDropout(0.5)(main_input)                                                          

    x = Conv3D(64, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m1 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(80, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m2 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(96, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m3 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(112, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m4 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(128, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m5 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(144, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m6 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(160, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m7 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(176, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m8 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(192, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m9 = GlobalMaxPooling3D()(x)                                                                                                                                             

    x = Conv3D(208, (3, 3, 3), dilation_rate=(1, 1, 1), padding='valid', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(x)
    x = ELU()(x)                         
    x = BatchNormalization()(x)
    m10 = GlobalMaxPooling3D()(x)                                                                                                                                             
                                                                                                                                                                                                                                                                                      
    m = Concatenate()([m1,m2,m3,m4,m5,m6,m7,m8,m9,m10])
    m = BatchNormalization()(m)
                                                                                  

    out = Dense(1024, name='fc2', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer)(m)    

    mod = keras.models.Model(input=main_input, output=out)                                     
                                                                               
    return mod                                                                 
Пример #18
0
 def protein_network(t):
     t = Conv3D_layer(filters=16, kernel_size=4)(t)
     t = Conv3D_layer(filters=32, kernel_size=6)(t)
     t = GlobalMaxPooling3D(data_format='channels_last')(t)
     return t
Пример #19
0
def _create_se_resnet(num_outputs,
                      img_input,
                      include_top,
                      initial_conv_filters,
                      filters,
                      depth,
                      width,
                      bottleneck,
                      weight_decay,
                      pooling,
                      activation="softmax",
                      padding="same"):
    """Creates a SE ResNet model with specified parameters
    Args:
        initial_conv_filters: number of features for the initial convolution
        include_top: Flag to include the last fc layer
        filters: number of filters per block, defined as a list.
            filters = [64, 128, 256, 512
        depth: number or layers in the each block, defined as a list.
            ResNet-50  = [3, 4, 6, 3]
            ResNet-101 = [3, 6, 23, 3]
            ResNet-152 = [3, 8, 36, 3]
        width: width multiplier for network (for Wide ResNet)
        bottleneck: adds a bottleneck conv to reduce computation
        weight_decay: weight_decay (l2 norm)
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 3D tensor.
            - `max` means that global max pooling will
                be applied.
    Returns: a Keras Model
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    N = list(depth)

    # block 1 (initial conv block)
    x = Conv3D(initial_conv_filters, (7, 7, 7),
               padding=padding,
               use_bias=False,
               strides=(2, 2, 2),
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(img_input)

    x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding=padding)(x)

    # block 2 (projection block)
    for i in range(N[0]):
        if bottleneck:
            x = _resnet_bottleneck_block(x, filters[0], width)
        else:
            x = _resnet_block(x, filters[0], width)

    # block 3 - N
    for k in range(1, len(N)):
        if bottleneck:
            x = _resnet_bottleneck_block(x,
                                         filters[k],
                                         width,
                                         strides=(2, 2, 2))
        else:
            x = _resnet_block(x, filters[k], width, strides=(2, 2, 2))

        for i in range(N[k] - 1):
            if bottleneck:
                x = _resnet_bottleneck_block(x, filters[k], width)
            else:
                x = _resnet_block(x, filters[k], width)

    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if include_top:
        x = GlobalAveragePooling3D()(x)
        x = Dense(num_outputs,
                  use_bias=False,
                  kernel_regularizer=l2(weight_decay),
                  activation=activation)(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling3D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling3D()(x)

    return x
Пример #20
0
def c3d(x):
    x = Conv3D(64, (3, 3, 3), strides=(2, 2, 2), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv3D(648, (3, 3, 3), strides=(2, 2, 2), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    
    x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same', 
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    _x = Conv3D(128, (3, 3, 3), strides=(2, 2, 2), padding='same', 
               kernel_initializer='he_normal', data_format='channels_last')(x)
    
    a = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(_x)    
    a = BatchNormalization(axis=channel_axis)(a)
    a = Activation('relu')(a)
    a = GlobalMaxPooling3D(data_format='channels_last')(a)
# first auxiliary network
    out1 = Dense(7, activation='softmax')(a)
    
    x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(_x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    __x = Conv3D(512, (3, 3, 3), strides=(2, 2, 2), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)

    c = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(__x)
    c = BatchNormalization(axis=channel_axis)(c)
    c = Activation('relu')(c)
    c = GlobalMaxPooling3D(data_format='channels_last')(c)
# second auxiliary network
    out2 = Dense(7, activation='softmax')(c)
    
    x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(__x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv3D(512, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    ___x = Conv3D(512, (3, 3, 3), strides=(2, 2, 2), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    
    b = Conv3D(1024, (3, 3, 3), strides=(1, 1, 1), padding='same', 
               kernel_initializer='he_normal', data_format='channels_last')(___x)
    b = BatchNormalization(axis=channel_axis)(b)
    b = Activation('relu')(b)
    b = Conv3D(1024, (3, 3, 3), strides=(1, 1, 1), padding='same', 
               kernel_initializer='he_normal', data_format='channels_last')(b)
    b = BatchNormalization(axis=channel_axis)(b)
    b = Activation('relu')(b)
    b = GlobalMaxPooling3D(data_format='channels_last')(b)

    out3 = Dense(7, activation='softmax')(b)
    
    return out1, out2, out3
Пример #21
0
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3,
                       growth_rate=12, nb_filter=-1, nb_layers_per_block=-1,
                       bottleneck=False, reduction=0.0, dropout_rate=None,
                       weight_decay=1e-4, subsample_initial_block=False, pooling=None,
                       activation='softmax', transition_pooling='avg'):
    ''' Build the DenseNet model

    # Arguments
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. Default -1 indicates initial number
            of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
                Can be a -1, positive integer or a list.
                If -1, calculates nb_layer_per_block from the depth of the network.
                If positive integer, a set number of layers per dense block.
                If list, nb_layer is used as provided. Note that list size must
                be (nb_dense_block + 1)
        bottleneck: add bottleneck blocks
        reduction: reduction factor of transition blocks. Note : reduction value is
            inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay rate
        subsample_initial_block: Changes model type to suit different datasets.
            Should be set to True for ImageNet, and False for CIFAR datasets.
            When set to True, the initial convolution will be strided and
            adds a MaxPooling3D before the initial dense block.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        activation: Type of activation at the top layer. Can be one of 'softmax' or
            'sigmoid'. Note that if sigmoid is used, classes must be 1.
        transition_pooling: `avg` for avg pooling (default), `max` for max pooling,
            None for no pooling during scale transition blocks. Please note that this
            default differs from the DenseNetFCN paper in accordance with the DenseNet
            paper.

    # Returns
        a keras tensor

    # Raises
        ValueError: in case of invalid argument for `reduction`
            or `nb_dense_block`
    '''
    with K.name_scope('DenseNet'):
        concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

        if reduction != 0.0:
            if not (reduction <= 1.0 and reduction > 0.0):
                raise ValueError('`reduction` value must lie between 0.0 and 1.0')

        # layers in each dense block
        if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
            nb_layers = list(nb_layers_per_block)  # Convert tuple to list

            if len(nb_layers) != nb_dense_block:
                raise ValueError('If `nb_dense_block` is a list, its length must match '
                                 'the number of layers provided by `nb_layers`.')

            final_nb_layer = nb_layers[-1]
            nb_layers = nb_layers[:-1]
        else:
            if nb_layers_per_block == -1:
                assert (depth - 4) % 3 == 0, ('Depth must be 3 N + 4 '
                                              'if nb_layers_per_block == -1')
                count = int((depth - 4) / 3)

                if bottleneck:
                    count = count // 2

                nb_layers = [count for _ in range(nb_dense_block)]
                final_nb_layer = count
            else:
                final_nb_layer = nb_layers_per_block
                nb_layers = [nb_layers_per_block] * nb_dense_block

        # compute initial nb_filter if -1, else accept users initial nb_filter
        if nb_filter <= 0:
            nb_filter = 2 * growth_rate

        # compute compression factor
        compression = 1.0 - reduction

        # Initial convolution
        if subsample_initial_block:
            initial_kernel = (7, 7, 7)
            initial_strides = (2, 2, 2)
        else:
            initial_kernel = (3, 3, 3)
            initial_strides = (1, 1, 1)

        x = Conv3D(nb_filter, initial_kernel, kernel_initializer='he_normal',
                   padding='same', name='initial_Conv3D', strides=initial_strides,
                   use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)

        if subsample_initial_block:
            x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
                                   name='initial_bn')(x)
            x = Activation('relu')(x)
            x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x)

        # Add dense blocks
        for block_idx in range(nb_dense_block - 1):
            x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter,
                                         growth_rate, bottleneck=bottleneck,
                                         dropout_rate=dropout_rate,
                                         weight_decay=weight_decay,
                                         block_prefix='dense_%i' % block_idx)
            # add transition_block
            x = __transition_block(x, nb_filter, compression=compression,
                                   weight_decay=weight_decay,
                                   block_prefix='tr_%i' % block_idx,
                                   transition_pooling=transition_pooling)
            nb_filter = int(nb_filter * compression)

        # The last dense_block does not have a transition_block
        x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate,
                                     bottleneck=bottleneck, dropout_rate=dropout_rate,
                                     weight_decay=weight_decay,
                                     block_prefix='dense_%i' % (nb_dense_block - 1))

        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, name='final_bn')(x)
        x = Activation('relu')(x)

        if include_top:
            if pooling == 'avg':
                x = GlobalAveragePooling3D()(x)
            elif pooling == 'max':
                x = GlobalMaxPooling3D()(x)
            x = Dense(nb_classes, activation=activation)(x)
        else:
            if pooling == 'avg':
                x = GlobalAveragePooling3D()(x)
            elif pooling == 'max':
                x = GlobalMaxPooling3D()(x)

        return x
Пример #22
0
 def ligand_network(t):
     t = Conv3D_layer(filters=8, kernel_size=2)(t)
     t = Conv3D_layer(filters=16, kernel_size=4)(t)
     t = GlobalMaxPooling3D(data_format='channels_last')(t)
     return t
Пример #23
0
def model3d_layers(sz=48, alpha=1.5, do_features=False):

    layers = []

    def conv3dparams(**replace_params):
        params = {
            'activation': ELU(),
            'border_mode': 'valid',
            'init': 'he_normal'
        }
        params.update(replace_params)
        return params

    layers.append(Convolution3D(sz, 3, 3, 3, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")

    sz = int(sz * alpha)
    # if vsize == (32,32,32):
    #     layers.append( Convolution3D(sz, 3, 3, 3, subsample=(2,2,2), **conv3dparams()) )
    # else:
    layers.append(Convolution3D(sz, 3, 3, 3, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 3, 3, 3, **conv3dparams()))
    layers.append("BatchNormalization")
    # if vsize == (32,32,32):
    #     layers.append( Convolution3D(sz, 3, 3, 3, **conv3dparams()) )
    # else:
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(SpatialDropout3D(0.2))

    sz = int(sz * alpha)
    layers.append(Convolution3D(sz, 3, 3, 3, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 3, 3, 3, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(SpatialDropout3D(0.2))

    sz = int(sz * alpha)
    layers.append(Convolution3D(sz, 3, 3, 3, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 3, 3, 3, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(SpatialDropout3D(0.5))

    sz = int(sz * alpha)
    layers.append(Convolution3D(sz, 2, 2, 2, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(Convolution3D(sz, 1, 1, 1, **conv3dparams()))
    layers.append("BatchNormalization")
    layers.append(
        Convolution3D(1, 1, 1, 1,
                      **conv3dparams(activation='linear', border_mode='same')))

    layers.append(GlobalMaxPooling3D())
    layers.append(Activation('sigmoid'))

    return layers
Пример #24
0
                  name='conv1')(img_input)
x = MaxPooling3D(pool_size=(3, 3, 3),
                 strides=(2, 2, 2),
                 name='maxpool1',
                 padding="valid")(x)

x = firemodule(x, (16, 64, 64), name="fire2")
x = firemodule(x, (16, 64, 64), name="fire3")

x = MaxPooling3D(pool_size=(3, 3, 3),
                 strides=(2, 2, 2),
                 name='maxpool3',
                 padding="valid")(x)
x = firemodule(x, (32, 128, 128), name="fire4")
x = firemodule(x, (32, 128, 128), name="fire5")
x = MaxPooling3D(pool_size=(3, 3, 3),
                 strides=(2, 2, 2),
                 name='maxpool5',
                 padding="valid")(x)
x = firemodule(x, (48, 192, 192), name="fire6")
x = firemodule(x, (48, 192, 192), name="fire7")
x = firemodule(x, (64, 256, 256), name="fire8")
x = firemodule(x, (64, 256, 256), name="fire9")

x = GlobalMaxPooling3D(name="maxpool10")(x)
x = Dense(3, init='normal')(x)
x = Activation('softmax')(x)

model = Model(img_input, x, name="squeezenet")
model.summary()
Пример #25
0
def InceptionResNetV2(input_shape=None, classes=3):
    _inputs = Input(shape=input_shape)
    x = stem_block(_inputs)

    # Mixed 5b (Inception-A block): 17x21x14x320
    branch_0 = conv3d_bn(x, 96, 1)
    branch_1 = conv3d_bn(x, 48, 1)
    branch_1 = conv3d_bn(branch_1, 64, 5)
    branch_2 = conv3d_bn(x, 64, 1)
    branch_2 = conv3d_bn(branch_2, 96, 3)
    branch_2 = conv3d_bn(branch_2, 96, 3)
    branch_pool = AveragePooling3D(3, strides=1, padding='same')(x)
    branch_pool = conv3d_bn(branch_pool, 64, 1)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = Concatenate(name='mixed_5b')(branches)
    # 10x block35 (Inception-ResNet-A block): 17x21x14x320
    for block_idx in range(1, 11):
        x = inception_resnet_block(x,
                                   scale=0.17,
                                   block_type='block35',
                                   block_idx=block_idx)

    branch_0 = conv3d_bn(x, 384, 3, strides=2, padding='valid')
    branch_1 = conv3d_bn(x, 256, 1)
    branch_1 = conv3d_bn(branch_1, 256, 3)
    branch_1 = conv3d_bn(branch_1, 384, 3, strides=2, padding='valid')
    branch_pool = MaxPooling3D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_pool]
    x = Concatenate(name='mixed_6a')(branches)

    # 20x block17 (Inception-ResNet-B block): 7 x 9 x 5 x 1088
    for block_idx in range(1, 21):
        x = inception_resnet_block(x,
                                   scale=0.1,
                                   block_type='block17',
                                   block_idx=block_idx)

    # Mixed 6a (Reduction-A block): 7 x 9 x 5 x 1088

    x = Concatenate(name='mixed_6a')(branches)

    # 10x block8 (Inception-ResNet-C block): 7 x 9 x 5 x 1088
    for block_idx in range(1, 10):
        x = inception_resnet_block(x,
                                   scale=0.2,
                                   block_type='block8',
                                   block_idx=block_idx)

    x = inception_resnet_block(x,
                               scale=1.,
                               activation=None,
                               block_type='block8',
                               block_idx=10)

    # Final convolution block: 7 x 9 x 5 x 1536
    x = conv3d_bn(x, 1800, 1, name='conv_7b')
    x = GlobalMaxPooling3D()(x)
    # x = Flatten()(x)

    # Create model
    model = Model(_inputs, x, name='inception_resnet_v2_3d')

    return model
Пример #26
0
def dual_path_net(initial_conv_filters,
                  filter_increment,
                  depth,
                  cardinality,
                  width,
                  pooling='max-avg',
                  bias_flag=False):
    '''
    Args:
        initial_conv_filters: number of features for the initial convolution  初始化输出的张量通道数
        include_top: Flag to include the last dense layer
        initial_conv_filters: number of features for the initial convolution
        filter_increment: number of filters incremented per block, defined as a list.
            DPN-92  = [16, 32, 24, 128]
            DON-98  = [16, 32, 32, 128]
            DPN-131 = [16, 32, 32, 128]
            DPN-107 = [20, 64, 64, 128]
        depth: number or layers in the each block, defined as a list.
            DPN-92  = [3, 4, 20, 3]
            DPN-98  = [3, 6, 20, 3]
            DPN-131 = [4, 8, 28, 3]
            DPN-107 = [4, 8, 20, 3]
        width: width multiplier for network 分组卷积每组的卷积核数量,所以也就是直接指定每组包括多少卷积核和组数即可,不过确实有点绕,因为这样做则要求dpn block函数中的参数pointwise_filters_a和grouped_conv_filters_b相同
        pooling: Optional pooling mode for feature extraction
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
            - `max-avg` means that both global average and global max
                pooling will be applied to the output of the last
                convolution layer
    Returns: a Keras Model
    '''
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1  #
    N = list(depth)  #
    base_filters = 256  #

    # input set
    img_input = Input(shape=(280, 280, 16, 1))  #
    # block 1 (initial conv block)
    x = _initial_conv_block_inception(img_input,
                                      initial_conv_filters,
                                      bias_flag=bias_flag)  #
    print('BLOCK 1 init shape :', x.shape)  #
    # block 2 (projection block)
    filter_inc = filter_increment[0]  #
    # filter_increment: number of filters incremented per block, defined as a list.
    # DPN-92  = [16, 32, 24, 128]
    filters = int(cardinality * width)  #

    x = _dual_path_block(x,
                         pointwise_filters_a=filters,
                         grouped_conv_filters_b=filters,
                         pointwise_filters_c=base_filters,
                         filter_increment=filter_inc,
                         cardinality=cardinality,
                         block_type='projection',
                         bias_flag=bias_flag)  #

    for i in range(N[0] - 1):
        x = _dual_path_block(x,
                             pointwise_filters_a=filters,
                             grouped_conv_filters_b=filters,
                             pointwise_filters_c=base_filters,
                             filter_increment=filter_inc,
                             cardinality=cardinality,
                             block_type='normal',
                             bias_flag=bias_flag)  #

    print("BLOCK 1 out shape : res_path:", x[0].shape, " vs.  dense_path",
          x[1].shape)  #
    # remaining blocks
    for k in range(1, len(N)):

        filter_inc = filter_increment[k]  #
        filters *= 2  # 进入到下一个大的Block(注意不是dpn block),filters(等于分组卷积的通道数)也要翻倍
        base_filters *= 2  # 这个参数相当于把densepath的通道数改变一下,有点过度模块的意思,因此这个参数要不断增加,因为原始dense net的过度模块也是随着网络深入而逐渐卷积核变多的

        x = _dual_path_block(x,
                             pointwise_filters_a=filters,
                             grouped_conv_filters_b=filters,
                             pointwise_filters_c=base_filters,
                             filter_increment=filter_inc,
                             cardinality=cardinality,
                             block_type='downsample',
                             bias_flag=bias_flag)  #
        print("BLOCK", (k + 1), "d_sample shape : res_path:", x[0].shape,
              " vs.  dense_path", x[1].shape)  #
        for i in range(N[k] - 1):
            x = _dual_path_block(x,
                                 pointwise_filters_a=filters,
                                 grouped_conv_filters_b=filters,
                                 pointwise_filters_c=base_filters,
                                 filter_increment=filter_inc,
                                 cardinality=cardinality,
                                 block_type='normal',
                                 bias_flag=bias_flag)  #

        print("BLOCK", (k + 1), "out shape : res_path:", x[0].shape,
              " vs.  dense_path", x[1].shape)  #

    x = concatenate(x, axis=channel_axis)  #
    print("CONCAT out shape : ", x.shape)

    if pooling == 'avg':
        x = GlobalAveragePooling3D(data_format='channels_last')(x)
    elif pooling == 'max':
        x = GlobalMaxPooling3D(data_format='channels_last')(x)
    elif pooling == 'max-avg':
        a = GlobalMaxPooling3D(data_format='channels_last')(x)
        b = GlobalAveragePooling3D(data_format='channels_last')(x)
        x = add([a, b])
        x = Lambda(lambda z: 0.5 * z)(x)

    print("GApooling shape:", x.shape)
    out_drop = Dropout(rate=0.3)(x)
    out = Dense(1, name='fc1', use_bias=bias_flag)(out_drop)
    print("out shape:", out.shape)
    output = Activation(activation='sigmoid')(out)

    model = Model(input=img_input, output=output)

    return model
Пример #27
0
def c3da_ae(x):
    
    # encoder
    x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv3D(32, (3, 3, 3), strides=(2, 2, 2), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    
    x = Conv3D(64, (3, 3, 3), strides=(1, 1, 1), padding='same', 
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv3D(64, (3, 3, 3), strides=(2, 2, 2), padding='same', 
               kernel_initializer='he_normal', data_format='channels_last')(x)
    
    x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x_1 = Conv3D(128, (3, 3, 3), strides=(2, 2, 2), padding='same',
                 kernel_initializer='he_normal', data_format='channels_last')(x)

    x = Conv3D(256, (3, 3, 3), strides=(2, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x_1)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv3D(256, (3, 3, 3), strides=(2, 2, 2), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(x)
    
    
    # decoder-1
    y = UpSampling3D(size=(2, 2, 2), data_format='channels_last')(x_1)
    y = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(y)
    y = BatchNormalization(axis=channel_axis)(y)
    y = Activation('relu')(y)
    y = Conv3D(64, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(y)
    y = BatchNormalization(axis=channel_axis)(y)
    y = Activation('relu')(y)
    
    y = UpSampling3D(size=(2, 2, 2), data_format='channels_last')(y)
    y = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(y)
    y = BatchNormalization(axis=channel_axis)(y)
    y = Activation('relu')(y)
    
    y = UpSampling3D(size=(2, 2, 2), data_format='channels_last')(y)
    y = Conv3D(1, (3, 3, 3), strides=(1, 1, 1), padding='same',
               kernel_initializer='he_normal', data_format='channels_last')(y)
    y = BatchNormalization(axis=channel_axis)(y)
    out1 = Activation('sigmoid')(y)
    
    
    # decoder-2
    out2 = GlobalMaxPooling3D(data_format='channels_last')(x)
    out2 = Dense(7, activation='softmax')(out2)
    
    return out1, out2