Ejemplo n.º 1
0
def create_model(inp_shape):
    inp = layers.Input(shape=inp_shape)
    x = layers.Conv3D(64, kernel_size=(7, 7), strides=(2, 2),
                      padding='same')(inp)
    x = add_common_layers(x)

    x = layers.MaxPool3D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
    for i in range(3):
        project_shortcut = True if i == 0 else False
        x = residual_block(x, 128, 256, project_shortcut=project_shortcut)

    for i in range(4):
        # down-sampling is performed by conv3_1, conv4_1, and conv5_1 with a stride of 2
        strides = (2, 2) if i == 0 else (1, 1)
        x = residual_block(x, 256, 512, strides=strides)

    for i in range(6):
        strides = (2, 2) if i == 0 else (1, 1)
        x = residual_block(x, 512, 1024, strides=strides)

    for i in range(3):
        strides = (2, 2) if i == 0 else (1, 1)
        x = residual_block(x, 1024, 2048, strides=strides)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(16)(x)
    x = Sparse(np.random.randint(16, size=10).reshape((16, 2)))(x)
    x = layers.Dense(1)(x)

    model = models.Model(inputs=[inp], outputs=[x])
    print(model.summary())
    return model
Ejemplo n.º 2
0
def get_model(width=64, height=64 ,depth=16):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((depth,width, height, 3))
   
    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu",padding = 'same')(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu",padding = 'same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=128, kernel_size=3, activation="relu",padding = 'same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=256, kernel_size=3, activation="relu",padding = 'same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=27, activation="softmax")(x)

    # Define the model.
    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
Ejemplo n.º 3
0
def DenseNet3D(blocks, input_shape=None, classes=1000):
    img_input = layers.Input(shape=input_shape)
    bn_axis = 4

    # Conv Layer 1
    x = layers.ZeroPadding3D(padding=((3, 3), (3, 3), (3, 3)))(img_input)
    x = layers.Conv3D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name='conv1/bn')(x)
    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.ZeroPadding3D(padding=((1, 1), (1, 1), (1, 1)))(x)
    x = layers.MaxPooling3D(3, strides=2, name='pool1')(x)

    # Dense Blocks
    for i, block in enumerate(blocks):
        x = dense_block3D(x, block, name='conv' + str(i + 2))
        if i < len(blocks) - 1:
            x = transition_block3D(x, 0.5, name='pool' + str(i + 2))

    # Final Layers
    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
    x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
    x = layers.Dense(classes, activation='softmax', name='fc')(x)

    # Create model
    model = models.Model(img_input, x, name='densenet3D')
    return model
def SqueezeExcitation(in_block, ch, ratio=10):

    x = layers.GlobalAveragePooling3D()(in_block)
    x = layers.Dense(ch//ratio, activation='relu')(x)
    x = layers.Dense(ch, activation='sigmoid')(x)

    return layers.Multiply()([in_block, x])
Ejemplo n.º 5
0
    def __init__(self, shape):
        self.re_rate = 0.9
        self.inputs = layers.Input(shape=shape)

        self.f_block = layers.Conv3D(4, (3, 3, 3),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.inputs)
        self.bn = layers.BatchNormalization()(self.f_block)
        self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block1 = layers.Conv3D(8, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp1)
        self.bn = layers.BatchNormalization()(self.f_block1)

        self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block2 = layers.Conv3D(8, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp2)
        self.f_block2 = layers.BatchNormalization()(self.f_block2)

        self.b_back2 = layers.Conv3D(8, (3, 3, 3),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.f_block2)
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.b_back2 = layers.Conv3D(
            8, (3, 3, 3),
            activation='relu',
            kernel_regularizer=regularizers.l2(self.re_rate),
            padding='same')(layers.UpSampling3D((2, 2, 2))(self.f_block2))
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.cat2 = layers.concatenate([self.f_block1, self.b_back2])
        self.bn = layers.BatchNormalization()(self.cat2)

        self.b_back1 = layers.Conv3D(
            8, (3, 3, 3),
            activation='relu',
            kernel_regularizer=regularizers.l2(self.re_rate),
            padding='same')(layers.UpSampling3D((2, 2, 2))(self.bn))
        self.b_back1 = layers.BatchNormalization()(self.b_back1)

        self.gb = layers.GlobalAveragePooling3D()(self.b_back1)
        self.drop = layers.Dropout(rate=0.9)(self.gb)

        self.dense = layers.Dense(1, activation='sigmoid')(self.drop)
        self.model = keras.Model(input=[self.inputs], output=self.dense)
def AffineBlock(out, affine_regularisation, affine_trainable):

    # Affine
    affine = layers.GlobalAveragePooling3D()(out)

    affine = layers.Dense(12,
                          kernel_initializer='zeros',
                          bias_initializer='zeros',
                          trainable=affine_trainable,
                          activity_regularizer=DefReg(alpha=affine_regularisation,
                                                      value=0))(affine)

    affine = layers.Activation('linear')(affine)

    return affine
Ejemplo n.º 7
0
    def __init__(self, shape):
        self.re_rate = 0.9
        dr = 0.9
        self.inputs = layers.Input(shape=shape)

        self.f_block = layers.Conv3D(4, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate),
                                     padding='same')(self.inputs)
        self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.f_block)
        self.bn1 = layers.BatchNormalization()(self.mp1)

        self.f_block1 = layers.Conv3D(16, (3, 3, 3), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.bn1)
        self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.f_block1)
        self.bn2 = layers.BatchNormalization()(self.mp2)

        self.f_block2 = layers.Conv3D(32, (3, 3, 3), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.bn2)
        self.mp3 = layers.MaxPooling3D((2, 2, 2))(self.f_block2)
        self.bn3 = layers.BatchNormalization()(self.mp3)

        self.f_block3 = layers.Conv3D(64, (3, 3, 3), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.bn3)
        self.f_block3 = layers.BatchNormalization()(self.f_block3)

        self.b_back3 = layers.Conv3D(128, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate),
                                     padding='same')(self.f_block3)
        self.b_back3 = layers.BatchNormalization()(self.b_back3)

        self.b_back2 = layers.Conv3D(64, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate),
                                     padding='same')(layers.UpSampling3D((2, 2, 2))(self.b_back3))
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.b_back1 = layers.Conv3D(32, (3, 3, 3), activation='relu',
                                     kernel_regularizer=regularizers.l2(self.re_rate),
                                     padding='same')(layers.UpSampling3D((2, 2, 2))(self.b_back2))
        self.b_back1 = layers.BatchNormalization()(self.b_back1)

        self.gb = layers.GlobalAveragePooling3D()(self.b_back1)
        self.dr = layers.Dropout(rate=dr)(self.gb)
        self.dense = layers.Dense(1, activation='sigmoid')(self.dr)

        self.model = keras.Model(input=self.inputs, output=self.dense)
Ejemplo n.º 8
0
def get_resnet50_3D(show=False):
    if backend.image_data_format() == 'channels_last':
        bn_axis = 4
    else:
        bn_axis = 1
    inputs = layers.Input((None, None, None, 1))
    x = layers.ZeroPadding3D(padding=(3, 3, 3), name='conv1_pad')(inputs)
    x = layers.Conv3D(64, (7, 7, 7),
                      strides=(2, 2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling3D((3, 3, 3), strides=(2, 2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(1024)(x)
    x = layers.Activation('relu')(x)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(1, name='fc')(x)
    x = layers.Activation(activation='sigmoid')(x)
    model = models.Model(inputs, x, name='resnet50')
    if show:
        model.summary()
        plot_model(model, 'resnet50_3D.pdf', True)
        model.save('resnet50_3D.h5')
    return model
Ejemplo n.º 9
0
    def single_channel(self, input):
        conv1 = layers.Conv3D(4, (5, 6, 5), activation="relu",
                                   kernel_regularizer=regularizers.l2(self.re_rate))(input)
        conv2 = layers.Conv3D(8, (3, 3, 3), activation='relu',
                                   kernel_regularizer=regularizers.l2(self.re_rate))(conv1)
        mp_1 = layers.MaxPooling3D((2, 2, 2))(conv2)

        conv3 = layers.Conv3D(16, (3, 3, 3), activation='relu',
                                   kernel_regularizer=regularizers.l2(self.re_rate))(mp_1)
        conv4 = layers.Conv3D(16, (3, 3, 3), activation='relu',
                                   kernel_regularizer=regularizers.l2(self.re_rate))(conv3)

        mp_2 = layers.MaxPooling3D((2, 2, 2))(conv4)

        conv5 = layers.Conv3D(16, (3, 1, 3), activation='relu',
                                   kernel_regularizer=regularizers.l2(self.re_rate))(mp_2)
        conv6 = layers.Conv3D(16, (3, 1, 3), activation='relu',
                              kernel_regularizer=regularizers.l2(self.re_rate))(conv5)
        gb = layers.GlobalAveragePooling3D()(conv6)
        dr = layers.Dropout(rate=self.dr)(gb)
        return dr
Ejemplo n.º 10
0
    def __init__(self, shape):
        self.re_rate = 0.9
        dr = 0.9
        self.inputs = layers.Input(shape=shape)
        self.block1 = layers.Conv3D(4, (3, 3, 3),
                                    activation='relu',
                                    kernel_regularizer=regularizers.l2(
                                        self.re_rate),
                                    padding='same')(self.inputs)
        self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.block1)
        self.bn1 = layers.BatchNormalization()(self.mp1)

        self.block2 = layers.Conv3D(32, (3, 3, 3),
                                    activation='relu',
                                    kernel_regularizer=regularizers.l2(
                                        self.re_rate),
                                    padding='same')(self.bn1)
        self.bn2 = layers.BatchNormalization()(self.block2)

        self.block3 = layers.Conv3D(64, (3, 3, 3),
                                    activation='relu',
                                    kernel_regularizer=regularizers.l2(
                                        self.re_rate),
                                    padding='same')(self.bn2)
        self.bn3 = layers.BatchNormalization()(self.block3)

        self.block4 = layers.Conv3D(32, (3, 3, 3),
                                    activation='relu',
                                    kernel_regularizer=regularizers.l2(
                                        self.re_rate),
                                    padding='same')(self.bn3)
        self.bn4 = layers.BatchNormalization()(self.block4)

        self.add1 = layers.add([self.bn2, self.bn4])
        self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.add1)

        self.block5 = layers.Conv3D(64, (3, 3, 3),
                                    activation='relu',
                                    kernel_regularizer=regularizers.l2(
                                        self.re_rate),
                                    padding='same')(self.mp2)
        self.bn5 = layers.BatchNormalization()(self.block5)

        self.block6 = layers.Conv3D(128, (3, 3, 3),
                                    activation='relu',
                                    kernel_regularizer=regularizers.l2(
                                        self.re_rate),
                                    padding='same')(self.bn5)
        self.bn6 = layers.BatchNormalization()(self.block6)

        self.block7 = layers.Conv3D(64, (3, 3, 3),
                                    activation='relu',
                                    kernel_regularizer=regularizers.l2(
                                        self.re_rate),
                                    padding='same')(self.bn6)
        self.bn7 = layers.BatchNormalization()(self.block7)

        self.add2 = layers.add([self.bn5, self.bn7])

        self.gb = layers.GlobalAveragePooling3D()(self.add2)
        self.dr = layers.Dropout(rate=dr)(self.gb)
        self.dense = layers.Dense(1, activation='sigmoid')(self.dr)

        self.model = keras.Model(input=self.inputs, output=self.dense)
Ejemplo n.º 11
0
def get_model(x, y, z):
    rate = 0.3
    dropout_rate = 0.2
    model = models.Sequential()
    model.add(
        layers.Conv3D(4, (3, 3, 3),
                      activation='relu',
                      input_shape=(x, y, z, 1)))
    model.add(
        layers.Conv3D(8, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(
        layers.Conv3D(16, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(32, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(32, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.MaxPooling3D((2, 2, 2)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(32, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(32, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.MaxPooling3D((2, 2, 2)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(64, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(64, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(64, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.MaxPooling3D((2, 2, 2)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(64, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv3D(64, (3, 3, 3),
                      activation='relu',
                      kernel_regularizer=regularizers.l2(rate)))
    model.add(layers.BatchNormalization())
    model.add(layers.GlobalAveragePooling3D())
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(16, activation='relu'))
    model.add(layers.Dense(8, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    # plot_model(model, "./img/model.png", True)
    model.summary()
    return model
Ejemplo n.º 12
0
    def __init__(self, shape):
        """
        更改临床数据的传入位置
        在主管上,我们给出评级:sex,apoe4对于脑部区域有着明显的影响,并且与MRI影响有着强相关关系,应该在临床数据使用的前面
        age,marriage,edu:理论上影响不是很大,是社会对于精神的影响可以放在第二层次
        mmse:对最终结果有着明显的指向
        :param shape:
        """
        self.re_rate = 0.6
        self.inputs = layers.Input(shape=shape)

        self.f_block = layers.Conv3D(4, (3, 3, 3),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.inputs)
        self.bn = layers.BatchNormalization()(self.f_block)
        self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block1 = layers.Conv3D(8, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp1)
        self.bn = layers.BatchNormalization()(self.f_block1)

        self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block2 = layers.Conv3D(16, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp2)
        self.f_block2 = layers.BatchNormalization()(self.f_block2)

        self.b_back2 = layers.Conv3D(32, (3, 3, 3),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.f_block2)
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.b_back2 = layers.Conv3D(
            32, (3, 3, 3),
            activation='relu',
            kernel_regularizer=regularizers.l2(self.re_rate),
            padding='same')(layers.UpSampling3D((2, 2, 2))(self.f_block2))
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.cat2 = layers.concatenate([self.f_block1, self.b_back2])
        self.bn = layers.BatchNormalization()(self.cat2)

        self.b_back1 = layers.Conv3D(
            32, (3, 3, 3),
            activation='relu',
            kernel_regularizer=regularizers.l2(self.re_rate),
            padding='same')(layers.UpSampling3D((2, 2, 2))(self.bn))
        self.b_back1 = layers.BatchNormalization()(self.b_back1)

        self.gb = layers.GlobalAveragePooling3D()(self.b_back1)
        self.drop = layers.Dropout(rate=0.9)(self.gb)

        # add apoe4
        apoe4_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(apoe4_input)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add sex
        sex_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(sex_input)
        emCon = layers.Flatten()(embedded_layer)
        self.dense = layers.concatenate([self.drop, emCon])

        self.dense = layers.Dense(32, activation='relu')(self.dense)

        # add age
        age_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(age_input)
        emCon = layers.Flatten()(embedded_layer)
        self.dense = layers.concatenate([self.dense, emCon])

        # add marriage
        marriage_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(marriage_input)
        emCon = layers.Flatten()(embedded_layer)
        self.dense = layers.concatenate([self.dense, emCon])

        # add education
        edu_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(edu_input)
        emCon = layers.Flatten()(embedded_layer)
        self.dense = layers.concatenate([self.dense, emCon])

        self.dense = layers.Dense(16, activation='relu')(self.dense)

        # add mmse
        mmse_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(mmse_input)
        emCon = layers.Flatten()(embedded_layer)
        self.dense = layers.concatenate([self.dense, emCon])

        self.dense = layers.Dense(1, activation='sigmoid')(self.dense)
        self.model = keras.Model(input=[
            self.inputs, mmse_input, sex_input, age_input, marriage_input,
            apoe4_input, edu_input
        ],
                                 output=self.dense)
Ejemplo n.º 13
0
    def __init__(self, shape):
        """
        将dropout输出的结果直接影像最终结果
        :param shape:
        """
        self.re_rate = 0.6
        self.inputs = layers.Input(shape=shape)

        self.f_block = layers.Conv3D(4, (3, 3, 3),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.inputs)
        self.bn = layers.BatchNormalization()(self.f_block)
        self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block1 = layers.Conv3D(8, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp1)
        self.bn = layers.BatchNormalization()(self.f_block1)

        self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block2 = layers.Conv3D(16, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp2)
        self.f_block2 = layers.BatchNormalization()(self.f_block2)

        self.b_back2 = layers.Conv3D(32, (3, 3, 3),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.f_block2)
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.b_back2 = layers.Conv3D(
            64, (3, 3, 3),
            activation='relu',
            kernel_regularizer=regularizers.l2(self.re_rate),
            padding='same')(layers.UpSampling3D((2, 2, 2))(self.f_block2))
        self.b_back2 = layers.BatchNormalization()(self.b_back2)

        self.cat2 = layers.concatenate([self.f_block1, self.b_back2])
        self.bn = layers.BatchNormalization()(self.cat2)

        self.b_back1 = layers.Conv3D(
            32, (3, 3, 3),
            activation='relu',
            kernel_regularizer=regularizers.l2(self.re_rate),
            padding='same')(layers.UpSampling3D((2, 2, 2))(self.bn))
        self.b_back1 = layers.BatchNormalization()(self.b_back1)

        self.gb = layers.GlobalAveragePooling3D()(self.b_back1)
        self.gb_drop = layers.Dropout(rate=0.9)(self.gb)

        self.pure_dense = layers.Dense(1, activation='sigmoid')(self.gb_drop)

        # add mmse
        mmse_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(mmse_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = emCon

        # add sex
        sex_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(sex_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add age
        age_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(age_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add marriage
        marriage_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(marriage_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add apoe4
        apoe4_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(apoe4_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        # add education
        edu_input = layers.Input(shape=(1, ), dtype='int32')
        embedded_layer = layers.Embedding(shape[-1], 1)(edu_input)
        embedded_layer = layers.Conv1D(4, 1, activation='relu')(embedded_layer)
        emCon = layers.Flatten()(embedded_layer)
        self.drop = layers.concatenate([self.drop, emCon])

        self.drop = layers.concatenate([self.gb_drop, self.drop])

        self.dense = layers.Dense(1, activation='sigmoid')(self.drop)

        self.model = keras.Model(input=[
            self.inputs, mmse_input, sex_input, age_input, marriage_input,
            apoe4_input, edu_input
        ],
                                 output=[self.pure_dense, self.dense])
def MobileNet(input_shape=None,
              alpha=1.0,
              depth_multiplier=1,
              dropout=1e-3,
              include_top=True,
              weights='imagenet',
              input_tensor=None,
              pooling=None,
              classes=1000,
              **kwargs):
    """Instantiates the MobileNet architecture.

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)`
            (with `channels_last` data format)
            or (3, 224, 224) (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(200, 200, 3)` would be one valid value.
        alpha: controls the width of the network. This is known as the
            width multiplier in the MobileNet paper.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier: depth multiplier for depthwise convolution. This
            is called the resolution multiplier in the MobileNet paper.
        dropout: dropout rate
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    global backend, layers, models, keras_utils
    backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top` '
            'as true, `classes` should be 1000')

    # Determine proper input shape and default size.
    if input_shape is None:
        default_size = 224
    else:
        if backend.image_data_format() == 'channels_first':
            rows = input_shape[1]
            cols = input_shape[2]
        else:
            rows = input_shape[0]
            cols = input_shape[1]

        if rows == cols and rows in [128, 160, 192, 224]:
            default_size = rows
        else:
            default_size = 224

    if backend.image_data_format() == 'channels_last':
        row_axis, col_axis = (0, 1)
    else:
        row_axis, col_axis = (1, 2)
    rows = input_shape[row_axis]
    cols = input_shape[col_axis]

    if weights == 'imagenet':
        if depth_multiplier != 1:
            raise ValueError('If imagenet weights are being loaded, '
                             'depth multiplier must be 1')

        if alpha not in [0.25, 0.50, 0.75, 1.0]:
            raise ValueError('If imagenet weights are being loaded, '
                             'alpha can be one of'
                             '`0.25`, `0.50`, `0.75` or `1.0` only.')

        if rows != cols or rows not in [128, 160, 192, 224]:
            rows = 224
            warnings.warn('`input_shape` is undefined or non-square, '
                          'or `rows` is not in [128, 160, 192, 224]. '
                          'Weights for input shape (224, 224) will be'
                          ' loaded as the default.')

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = _conv_block(img_input, 32, alpha, strides=(2, 2, 2))
    x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)

    x = _depthwise_conv_block(x,
                              128,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2, 2),
                              block_id=2)
    x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)

    x = _depthwise_conv_block(x,
                              256,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2, 2),
                              block_id=4)
    x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)

    x = _depthwise_conv_block(x,
                              512,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2, 2),
                              block_id=6)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
    x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)

    x = _depthwise_conv_block(x,
                              1024,
                              alpha,
                              depth_multiplier,
                              strides=(2, 2, 2),
                              block_id=12)
    x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)

    if include_top:
        if backend.image_data_format() == 'channels_first':
            shape = (int(1024 * alpha), 1, 1, 1)
        else:
            shape = (1, 1, 1, int(1024 * alpha))

        x = layers.GlobalAveragePooling3D()(x)
        x = layers.Reshape(shape, name='reshape_1')(x)
        x = layers.Dropout(dropout, name='dropout')(x)
        x = layers.Conv3D(classes, (1, 1, 1),
                          padding='same',
                          name='conv_preds')(x)
        x = layers.Reshape((classes, ), name='reshape_2')(x)
        x = layers.Activation('softmax', name='act_softmax')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling3D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling3D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))

    # Load weights.
    if weights == 'imagenet':
        if alpha == 1.0:
            alpha_text = '1_0'
        elif alpha == 0.75:
            alpha_text = '7_5'
        elif alpha == 0.50:
            alpha_text = '5_0'
        else:
            alpha_text = '2_5'

        if include_top:
            model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
            weight_path = BASE_WEIGHT_PATH + model_name
            weights_path = keras_utils.get_file(model_name,
                                                weight_path,
                                                cache_subdir='models')
        else:
            model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
            weight_path = BASE_WEIGHT_PATH + model_name
            weights_path = keras_utils.get_file(model_name,
                                                weight_path,
                                                cache_subdir='models')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
Ejemplo n.º 15
0
    def __init__(self, shape):
        self.re_rate = 0.9
        self.inputs = layers.Input(shape=shape)

        self.f_block = layers.Conv3D(4, (3, 3, 3),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.inputs)
        self.bn = layers.BatchNormalization()(self.f_block)
        self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.bn)

        self.f_block1 = layers.Conv3D(16, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp1)
        self.bn = layers.BatchNormalization()(self.f_block1)
        self.f_block1 = layers.Conv3D(16, (1, 1, 1),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.bn)
        self.f_block1 = layers.BatchNormalization()(self.f_block1)
        self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.f_block1)

        self.f_block2 = layers.Conv3D(32, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp2)
        self.f_block2 = layers.BatchNormalization()(self.f_block2)
        self.f_block2 = layers.Conv3D(32, (1, 1, 1),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.f_block2)
        self.f_block2 = layers.BatchNormalization()(self.f_block2)

        self.mp3 = layers.MaxPooling3D((2, 2, 2))(self.f_block2)

        self.f_block3 = layers.Conv3D(64, (3, 3, 3),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.mp3)
        self.f_block3 = layers.BatchNormalization()(self.f_block3)
        self.f_block3 = layers.Conv3D(64, (1, 1, 1),
                                      activation='relu',
                                      kernel_regularizer=regularizers.l2(
                                          self.re_rate),
                                      padding='same')(self.f_block3)
        self.f_block3 = layers.BatchNormalization()(self.f_block3)
        # self.mp4 = layers.MaxPooling3D((2, 2, 2))(self.f_block3)

        self.b_back3 = layers.Conv3D(64, (3, 3, 3),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.f_block3)
        self.b_back3 = layers.BatchNormalization()(self.b_back3)
        self.b_back3 = layers.Conv3D(64, (1, 1, 1),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.b_back3)
        self.b_back3 = layers.BatchNormalization()(self.b_back3)

        self.cat1 = layers.concatenate([self.f_block3, self.b_back3])
        self.bn4 = layers.BatchNormalization()(self.cat1)

        self.b_back2 = layers.Conv3D(
            64, (3, 3, 3),
            activation='relu',
            kernel_regularizer=regularizers.l2(self.re_rate),
            padding='same')(layers.UpSampling3D((2, 2, 2))(self.bn4))
        self.b_back2 = layers.BatchNormalization()(self.b_back2)
        self.b_back2 = layers.Conv3D(64, (1, 1, 1),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.b_back2)
        self.b_back2 = layers.BatchNormalization()(self.b_back2)
        self.cat2 = layers.concatenate([self.mp2, self.b_back2])
        self.bn = layers.BatchNormalization()(self.cat2)

        self.b_back1 = layers.Conv3D(
            32, (3, 3, 3),
            activation='relu',
            kernel_regularizer=regularizers.l2(self.re_rate),
            padding='same')(layers.UpSampling3D((2, 2, 2))(self.bn))
        self.b_back1 = layers.BatchNormalization()(self.b_back1)
        self.b_back1 = layers.Conv3D(32, (1, 1, 1),
                                     activation='relu',
                                     kernel_regularizer=regularizers.l2(
                                         self.re_rate),
                                     padding='same')(self.b_back1)
        self.b_back1 = layers.BatchNormalization()(self.b_back1)

        self.cat3 = layers.concatenate([self.mp1, self.b_back1])

        self.gb = layers.GlobalAveragePooling3D()(self.cat3)
        self.dense3 = layers.Dense(1, activation='sigmoid')(self.gb)

        self.model = keras.Model(input=self.inputs, output=self.dense3)
def InceptionResNetV2R2(include_top=True,
                        weights='imagenet',
                        input_tensor=None,
                        input_shape=None,
                        pooling=None,
                        classes=1000,
                        **kwargs):
    """Instantiates the Inception-ResNet v2 architecture.

    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.

    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is `False` (otherwise the input shape
            has to be `(299, 299, 3)` (with `'channels_last'` data format)
            or `(3, 299, 299)` (with `'channels_first'` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 75.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the last convolutional block.
            - `'avg'` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a 2D tensor.
            - `'max'` means that global max pooling will be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is `True`, and
            if no `weights` argument is specified.

    # Returns
        A Keras `Model` instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    global backend, layers, models, keras_utils
    #    backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
    from keras import backend, layers, models
    from keras import utils as keras_utils

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
#    input_shape = _obtain_input_shape(
#        input_shape,
#        default_size=299,
#        min_size=75,
#        data_format=backend.image_data_format(),
#        require_flatten=include_top,
#        weights=weights)
    input_shape = input_shape  #(96, 120, 86, 2)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # Stem block output: 21 x 27 x 19 x 256
    x = conv3d_bn(img_input, 48, 3, padding='valid')
    x = conv3d_bn(x, 64, 3)
    x1 = layers.MaxPooling3D(3, strides=2)(x)
    x2 = conv3d_bn(x, 64, 3, 2, padding='valid')
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else 4
    x = layers.Concatenate(axis=channel_axis)([x1, x2])  #nKernal = 128
    x1 = conv3d_bn(x, 64, 1)
    x1 = conv3d_bn(x1, 96, 3, padding='valid')
    x2 = conv3d_bn(x, 64, 1)
    x2 = conv3d_bn(x2, 64, [1, 7, 1])
    x2 = conv3d_bn(x2, 64, [1, 1, 7])
    x2 = conv3d_bn(x2, 64, [7, 1, 1])
    x2 = conv3d_bn(x2, 96, 3, padding='valid')
    x = layers.Concatenate(axis=channel_axis)([x1, x2])  #nKernal = 192
    x1 = conv3d_bn(x, 128, 3, 2, padding='valid')
    x2 = layers.MaxPooling3D(3, strides=2, padding='valid', name='StemEnd')(x)
    x = layers.Concatenate(axis=channel_axis)([x1, x2])  #nKernal = 320

    # 2x block35 (Inception-ResNet-A block) output: 21 x 27 x 19 x 320
    for block_idx in range(1, 3):
        x = inception_resnet_block(
            x,
            scale=0.17,
            #                                   scale=0.1, # reduce to 0.1 to avoid instability
            block_type='block35',
            block_idx=block_idx)

    # Mixed 6a (Reduction-A block) output: 10 x 13 x 9 x 640
    branch_0 = conv3d_bn(x, 160, 3, strides=2, padding='valid')
    branch_1 = conv3d_bn(x, 128, 1)
    branch_1 = conv3d_bn(branch_1, 128, 3)
    branch_1 = conv3d_bn(branch_1, 160, 3, strides=2, padding='valid')
    branch_pool = layers.MaxPooling3D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_pool]
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else 4
    x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)

    # 4x block17 (Inception-ResNet-B block) output: 10 x 13 x 9 x 640
    for block_idx in range(1, 5):
        x = inception_resnet_block(x,
                                   scale=0.1,
                                   block_type='block17',
                                   block_idx=block_idx)

    # Mixed 7a (Reduction-B block): 4 x 6 x 4 x 1408
    branch_0 = conv3d_bn(x, 192, 1)
    branch_0 = conv3d_bn(branch_0, 224, 3, strides=2, padding='valid')
    branch_1 = conv3d_bn(x, 192, 1)
    branch_1 = conv3d_bn(branch_1, 288, 3, strides=2, padding='valid')
    branch_2 = conv3d_bn(x, 192, 1)
    branch_2 = conv3d_bn(branch_2, 224, 3)
    branch_2 = conv3d_bn(branch_2, 256, 3, strides=2, padding='valid')
    branch_pool = layers.MaxPooling3D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)

    # 2x block8 (Inception-ResNet-C block): 4 x 6 x 4 x 1408
    for block_idx in range(1, 2):
        x = inception_resnet_block(
            x,
            scale=0.2,
            #                                   scale=0.1, # reduce to 0.1 to avoid instability
            block_type='block8',
            block_idx=block_idx)
    x = inception_resnet_block(x,
                               scale=1.,
                               activation=None,
                               block_type='block8',
                               block_idx=5)

    # Final convolution block: 4 x 6 x 4 x 512
    x = conv3d_bn(x, 512, 1, name='conv_7b')

    if include_top:
        # Classification block
        x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling3D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling3D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name='inception_resnet_v2_3D')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
            weights_path = keras_utils.get_file(
                fname,
                BASE_WEIGHT_URL + fname,
                cache_subdir='models',
                file_hash='e693bd0210a403b3192acc6073ad2e96')
        else:
            fname = ('inception_resnet_v2_weights_'
                     'tf_dim_ordering_tf_kernels_notop.h5')
            weights_path = keras_utils.get_file(
                fname,
                BASE_WEIGHT_URL + fname,
                cache_subdir='models',
                file_hash='d19885ff4a710c122648d3b5c3b684e4')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
Ejemplo n.º 17
0
    def __init__(self, shape):
        self.re_rate = 0.9
        self.input = layers.Input(shape=shape)

        self.modle = layers.Conv3D(32, (3, 3, 3), padding='same')(self.input)
        self.modle = layers.BatchNormalization()(self.modle)
        self.modle = layers.ReLU()(self.modle)

        self.modle = layers.Conv3D(32, (3, 3, 3), padding='same')(self.modle)
        self.modle = layers.BatchNormalization()(self.modle)
        self.modle = layers.ReLU()(self.modle)

        self.modle = layers.Conv3D(64, (3, 3, 3), padding='same',
                                   strides=2)(self.modle)

        self.tmp = layers.BatchNormalization()(self.modle)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(64, (3, 3, 3), padding='same')(self.tmp)
        self.tmp = layers.BatchNormalization()(self.tmp)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(64, (3, 3, 3), padding='same')(self.tmp)

        self.modle = layers.add([self.modle, self.tmp])

        self.tmp = layers.BatchNormalization()(self.modle)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(64, (3, 3, 3), padding='same')(self.tmp)
        self.tmp = layers.BatchNormalization()(self.tmp)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(64, (3, 3, 3), padding='same')(self.tmp)

        self.modle = layers.add([self.modle, self.tmp])

        self.modle = layers.BatchNormalization()(self.modle)
        self.modle = layers.ReLU()(self.modle)
        self.modle = layers.Conv3D(64, (3, 3, 3), padding='same',
                                   strides=2)(self.modle)

        self.tmp = layers.BatchNormalization()(self.modle)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(64, (3, 3, 3), padding='same')(self.tmp)
        self.tmp = layers.BatchNormalization()(self.tmp)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(64, (3, 3, 3), padding='same')(self.tmp)

        self.modle = layers.add([self.modle, self.tmp])

        self.tmp = layers.BatchNormalization()(self.modle)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(64, (3, 3, 3), padding='same')(self.tmp)
        self.tmp = layers.BatchNormalization()(self.tmp)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(64, (3, 3, 3), padding='same')(self.tmp)

        self.modle = layers.add([self.modle, self.tmp])

        self.modle = layers.BatchNormalization()(self.modle)
        self.modle = layers.ReLU()(self.modle)
        self.modle = layers.Conv3D(128, (3, 3, 3), padding='same',
                                   strides=2)(self.modle)

        self.tmp = layers.BatchNormalization()(self.modle)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(128, (3, 3, 3), padding='same')(self.tmp)
        self.tmp = layers.BatchNormalization()(self.tmp)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(128, (3, 3, 3), padding='same')(self.tmp)

        self.modle = layers.add([self.modle, self.tmp])

        self.tmp = layers.BatchNormalization()(self.modle)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(128, (3, 3, 3), padding='same')(self.tmp)
        self.tmp = layers.BatchNormalization()(self.tmp)
        self.tmp = layers.ReLU()(self.tmp)
        self.tmp = layers.Conv3D(128, (3, 3, 3), padding='same')(self.tmp)

        self.modle = layers.add([self.modle, self.tmp])

        self.modle = layers.GlobalAveragePooling3D()(self.modle)
        self.modle = layers.Dense(4, activation='softmax')(self.modle)

        self.modle = keras.Model(input=self.input, output=self.modle)
Ejemplo n.º 18
0
def residual_network(x):
    """
    ResNeXt by default. For ResNet set `cardinality` = 1 above.
    
    """
    def add_common_layers(y):
        y = layers.BatchNormalization()(y)
        y = layers.LeakyReLU()(y)
        y = layers.Dropout(dropout_level)(y)
        return y

    def grouped_convolution(y, nb_channels, _strides):
        # when `cardinality` == 1 this is just a standard convolution
        if cardinality == 1:
            return layers.Conv3D(
                nb_channels,
                kernel_size=(3, 3, 3),
                strides=_strides,
                padding='same',
                kernel_regularizer=regularizers.l2(L2_regularizer))(y)

        assert not nb_channels % cardinality
        _d = nb_channels // cardinality

        # in a grouped convolution layer, input and output channels are divided into `cardinality` groups,
        # and convolutions are separately performed within each group
        groups = []
        for j in range(cardinality):
            group = layers.Lambda(lambda z: z[:, :, :, :, j * _d:j * _d + _d])(
                y)
            groups.append(
                layers.Conv3D(
                    _d,
                    kernel_size=(3, 3, 3),
                    strides=_strides,
                    padding='same',
                    kernel_regularizer=regularizers.l2(L2_regularizer))(group))

        # the grouped convolutional layer concatenates them as the outputs of the layer
        y = layers.concatenate(groups)

        return y

    def residual_block(y,
                       nb_channels_in,
                       nb_channels_out,
                       _strides=(1, 1, 1),
                       _project_shortcut=False):
        """
        Our network consists of a stack of residual blocks. These blocks have the same topology,
        and are subject to two simple rules:

        - If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).
        - Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.
        """
        shortcut = y

        # we modify the residual building block as a bottleneck design to make the network more economical
        y = layers.Conv3D(
            nb_channels_in,
            kernel_size=(1, 1, 1),
            strides=(1, 1, 1),
            padding='same',
            kernel_regularizer=regularizers.l2(L2_regularizer))(y)
        y = add_common_layers(y)

        # ResNeXt (identical to ResNet when `cardinality` == 1)
        y = grouped_convolution(y, nb_channels_in, _strides=_strides)
        y = add_common_layers(y)

        y = layers.Conv3D(
            nb_channels_out,
            kernel_size=(1, 1, 1),
            strides=(1, 1, 1),
            padding='same',
            kernel_regularizer=regularizers.l2(L2_regularizer))(y)
        # batch normalization is employed after aggregating the transformations and before adding to the shortcut
        y = layers.BatchNormalization()(y)

        # identity shortcuts used directly when the input and output are of the same dimensions
        if _project_shortcut or _strides != (1, 1, 1):
            # when the dimensions increase projection shortcut is used to match dimensions (done by 1×1 convolutions)
            # when the shortcuts go across feature maps of two sizes, they are performed with a stride of 2
            shortcut = layers.Conv3D(
                nb_channels_out,
                kernel_size=(1, 1, 1),
                strides=_strides,
                padding='same',
                kernel_regularizer=regularizers.l2(L2_regularizer))(shortcut)
            shortcut = layers.BatchNormalization()(shortcut)

        y = layers.Add()([shortcut, y])

        # relu is performed right after each batch normalization,
        # expect for the output of the block where relu is performed after the adding to the shortcut
        y = layers.Activation('relu')(y)

        return y

    # conv1
    x = layers.Conv3D(64,
                      kernel_size=(7, 7, 7),
                      strides=(2, 2, 2),
                      padding='same',
                      kernel_regularizer=regularizers.l2(L2_regularizer))(x)
    x = add_common_layers(x)

    # conv2
    x = layers.MaxPooling3D(pool_size=(3, 3, 3),
                            strides=(2, 2, 2),
                            padding='same')(x)
    for i in range(2):
        project_shortcut = True if i == 0 else False
        x = residual_block(x, 64, 64, _project_shortcut=project_shortcut)

    # conv3
    for i in range(2):
        # down-sampling is performed by conv3_1, conv4_1, and conv5_1 with a stride of 2
        strides = (2, 2, 2) if i == 0 else (1, 1, 1)
        x = residual_block(x, 128, 128, _strides=strides)

    # conv4
    for i in range(3):
        strides = (2, 2, 2) if i == 0 else (1, 1, 1)
        x = residual_block(x, 256, 256, _strides=strides)

    # conv5
    #for i in range(2):
    #    strides = (2,2,2) if i == 0 else (1,1,1)
    #    x = residual_block(x, 512, 512, _strides=strides)

    x = layers.GlobalAveragePooling3D()(x)
    return x
def MobileNetV2(input_shape=None,
                alpha=1.0,
                include_top=True,
                weights='imagenet',
                input_tensor=None,
                pooling=None,
                classes=1000,
                **kwargs):
    """Instantiates the MobileNetV2 architecture.

    # Arguments
        input_shape: optional shape tuple, to be specified if you would
            like to use a model with an input img resolution that is not
            (224, 224, 3).
            It should have exactly 3 inputs channels (224, 224, 3).
            You can also omit this option if you would like
            to infer input_shape from an input_tensor.
            If you choose to include both input_tensor and input_shape then
            input_shape will be used if they match, if the shapes
            do not match then we will throw an error.
            E.g. `(160, 160, 3)` would be one valid value.
        alpha: controls the width of the network. This is known as the
        width multiplier in the MobileNetV2 paper, but the name is kept for
        consistency with MobileNetV1 in Keras.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape or invalid alpha, rows when
            weights='imagenet'
    """
    global backend, layers, models, keras_utils
    backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top` '
            'as true, `classes` should be 1000')

    # Determine proper input shape and default size.
    # If both input_shape and input_tensor are used, they should match
    if input_shape is not None and input_tensor is not None:
        try:
            is_input_t_tensor = backend.is_keras_tensor(input_tensor)
        except ValueError:
            try:
                is_input_t_tensor = backend.is_keras_tensor(
                    keras_utils.get_source_inputs(input_tensor))
            except ValueError:
                raise ValueError('input_tensor: ', input_tensor,
                                 'is not type input_tensor')
        if is_input_t_tensor:
            if backend.image_data_format == 'channels_first':
                if backend.int_shape(input_tensor)[1] != input_shape[1]:
                    raise ValueError(
                        'input_shape: ', input_shape, 'and input_tensor: ',
                        input_tensor,
                        'do not meet the same shape requirements')
            else:
                if backend.int_shape(input_tensor)[2] != input_shape[1]:
                    raise ValueError(
                        'input_shape: ', input_shape, 'and input_tensor: ',
                        input_tensor,
                        'do not meet the same shape requirements')
        else:
            raise ValueError('input_tensor specified: ', input_tensor,
                             'is not a keras tensor')

    # If input_shape is None, infer shape from input_tensor
    if input_shape is None and input_tensor is not None:

        try:
            backend.is_keras_tensor(input_tensor)
        except ValueError:
            raise ValueError('input_tensor: ', input_tensor, 'is type: ',
                             type(input_tensor), 'which is not a valid type')

        if input_shape is None and not backend.is_keras_tensor(input_tensor):
            default_size = 224
        elif input_shape is None and backend.is_keras_tensor(input_tensor):
            if backend.image_data_format() == 'channels_first':
                rows = backend.int_shape(input_tensor)[2]
                cols = backend.int_shape(input_tensor)[3]
            else:
                rows = backend.int_shape(input_tensor)[1]
                cols = backend.int_shape(input_tensor)[2]

            if rows == cols and rows in [96, 128, 160, 192, 224]:
                default_size = rows
            else:
                default_size = 224

    # If input_shape is None and no input_tensor
    elif input_shape is None:
        default_size = 224

    # If input_shape is not None, assume default size
    else:
        if backend.image_data_format() == 'channels_first':
            rows = input_shape[1]
            cols = input_shape[2]
        else:
            rows = input_shape[0]
            cols = input_shape[1]

        if rows == cols and rows in [96, 128, 160, 192, 224]:
            default_size = rows
        else:
            default_size = 224

    if backend.image_data_format() == 'channels_last':
        row_axis, col_axis = (0, 1)
    else:
        row_axis, col_axis = (1, 2)
    rows = input_shape[row_axis]
    cols = input_shape[col_axis]

    if weights == 'imagenet':
        if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
            raise ValueError('If imagenet weights are being loaded, '
                             'alpha can be one of `0.35`, `0.50`, `0.75`, '
                             '`1.0`, `1.3` or `1.4` only.')

        if rows != cols or rows not in [96, 128, 160, 192, 224]:
            rows = 224
            warnings.warn('`input_shape` is undefined or non-square, '
                          'or `rows` is not in [96, 128, 160, 192, 224].'
                          ' Weights for input shape (224, 224) will be'
                          ' loaded as the default.')

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1

    first_block_filters = _make_divisible(32 * alpha, 8)
    x = layers.ZeroPadding3D(padding=correct_pad(backend, img_input, 3),
                             name='Conv1_pad')(img_input)
    x = layers.Conv3D(first_block_filters,
                      kernel_size=3,
                      strides=(2, 2, 2),
                      padding='valid',
                      use_bias=False,
                      name='Conv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name='bn_Conv1')(x)
    x = layers.ReLU(6., name='Conv1_relu')(x)

    x = _inverted_res_block(x,
                            filters=16,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=0)

    x = _inverted_res_block(x,
                            filters=24,
                            alpha=alpha,
                            stride=2,
                            expansion=6,
                            block_id=1)
    x = _inverted_res_block(x,
                            filters=24,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=2)

    x = _inverted_res_block(x,
                            filters=32,
                            alpha=alpha,
                            stride=2,
                            expansion=6,
                            block_id=3)
    x = _inverted_res_block(x,
                            filters=32,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=4)
    x = _inverted_res_block(x,
                            filters=32,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=5)

    x = _inverted_res_block(x,
                            filters=64,
                            alpha=alpha,
                            stride=2,
                            expansion=6,
                            block_id=6)
    x = _inverted_res_block(x,
                            filters=64,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=7)
    x = _inverted_res_block(x,
                            filters=64,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=8)
    x = _inverted_res_block(x,
                            filters=64,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=9)

    x = _inverted_res_block(x,
                            filters=96,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=10)
    x = _inverted_res_block(x,
                            filters=96,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=11)
    x = _inverted_res_block(x,
                            filters=96,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=12)

    x = _inverted_res_block(x,
                            filters=160,
                            alpha=alpha,
                            stride=2,
                            expansion=6,
                            block_id=13)
    x = _inverted_res_block(x,
                            filters=160,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=14)
    x = _inverted_res_block(x,
                            filters=160,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=15)

    x = _inverted_res_block(x,
                            filters=320,
                            alpha=alpha,
                            stride=1,
                            expansion=6,
                            block_id=16)

    # no alpha applied to last conv as stated in the paper:
    # if the width multiplier is greater than 1 we
    # increase the number of output channels
    if alpha > 1.0:
        last_block_filters = _make_divisible(1280 * alpha, 8)
    else:
        last_block_filters = 1280

    x = layers.Conv3D(last_block_filters,
                      kernel_size=1,
                      use_bias=False,
                      name='Conv_1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  epsilon=1e-3,
                                  momentum=0.999,
                                  name='Conv_1_bn')(x)
    x = layers.ReLU(6., name='out_relu')(x)

    if include_top:
        x = layers.GlobalAveragePooling3D()(x)
        x = layers.Dense(classes,
                         activation='softmax',
                         use_bias=True,
                         name='Logits')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling3D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling3D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs,
                         x,
                         name='mobilenetv2_%0.2f_%s' % (alpha, rows))

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
                          str(alpha) + '_' + str(rows) + '.h5')
            weight_path = BASE_WEIGHT_PATH + model_name
            weights_path = keras_utils.get_file(model_name,
                                                weight_path,
                                                cache_subdir='models')
        else:
            model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' +
                          str(alpha) + '_' + str(rows) + '_no_top' + '.h5')
            weight_path = BASE_WEIGHT_PATH + model_name
            weights_path = keras_utils.get_file(model_name,
                                                weight_path,
                                                cache_subdir='models')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
Ejemplo n.º 20
0
    def __init__(self, shape):
        self.re_rate = 0.9

        self.input = layers.Input(shape=shape)

        self.b_block0 = layers.Conv3D(8, (3, 3, 3), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.input)
        self.b_block0 = layers.BatchNormalization()(self.b_block0)
        self.b_block0 = layers.MaxPooling3D((2, 2, 2))(self.b_block0)

        self.b_block1 = layers.Conv3D(8, (3, 3, 3), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.b_block0)
        self.b_block1 = layers.BatchNormalization()(self.b_block1)

        self.b_block2 = layers.Conv3D(8, (3, 3, 3), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding= 'same')(self.b_block1)
        self.b_block2 = layers.BatchNormalization()(self.b_block2)

        # self.c_block1 = layers.concatenate([self.b_block0, self.b_block2])
        self.add1 = layers.add([self.b_block0, self.b_block2])
        self.add1 = layers.BatchNormalization()(self.add1)

        self.b_block2 = layers.Conv3D(16, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.add1)

        self.b_block3 = layers.MaxPooling3D((2, 2, 2))(self.b_block2)

        self.b_block3 = layers.BatchNormalization()(self.b_block3)

        self.b_block4 = layers.Conv3D(16, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.b_block3)
        self.b_block4 = layers.BatchNormalization()(self.b_block4)

        self.b_block5 = layers.Conv3D(16, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.b_block4)
        self.b_block5 = layers.BatchNormalization()(self.b_block5)

        self.add2 = layers.add([self.b_block5, self.b_block3])
        self.add2 = layers.BatchNormalization()(self.add2)

        self.b_block6 = layers.Conv3D(32, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.add2)
        self.b_block6 = layers.MaxPooling3D((2, 2, 2))(self.b_block6)
        self.b_block6 = layers.BatchNormalization()(self.b_block6)

        self.b_block7 = layers.Conv3D(32, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.b_block6)
        self.b_block7 = layers.BatchNormalization()(self.b_block7)

        self.b_block8 = layers.Conv3D(32, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.b_block7)
        self.b_block8 = layers.BatchNormalization()(self.b_block8)

        self.add3 = layers.add([self.b_block8, self.b_block6])
        self.add3 = layers.BatchNormalization()(self.add3)

        self.b_block9 = layers.Conv3D(64, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.add3)
        self.b_block9 = layers.MaxPooling3D((2, 2, 2))(self.b_block9)
        self.b_block9 = layers.BatchNormalization()(self.b_block9)

        self.b_block10 = layers.Conv3D(64, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.b_block9)
        self.b_block10 = layers.BatchNormalization()(self.b_block10)

        self.b_block10 = layers.Conv3D(64, (2, 2, 2), activation='relu',
                                      kernel_regularizer=regularizers.l2(self.re_rate),
                                      padding='same')(self.b_block10)
        self.b_block10 = layers.BatchNormalization()(self.b_block10)
        self.add4 = layers.add([self.b_block10, self.b_block9])
        self.globpooling = layers.GlobalAveragePooling3D()(self.add4)
        # multi clasify begin
        self.dense3 = layers.Dense(4, activation='softmax')(self.globpooling)
        # end
        # two classify begin
        # self.dense3 = layers.Dense(1, activation='sigmoid')(self.globpooling)
        # two classify end
        self.model = keras.Model(input=self.input, output=self.dense3)