Пример #1
0
def get_3d_model2(input_shape=(128, 128, 128, 1), NUM_CLASSES=2):
    ## input layer
    input_layer = Input(input_shape)

    ## convolutional layers
    conv_layer1 = Conv3D(filters=16, kernel_size=(3, 3, 3), padding='same', activation='relu')(input_layer)
    conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 3), padding='same', activation='relu')(conv_layer1)
    pooling_layer1 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer2)

    conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(pooling_layer1)
    conv_layer4 = Conv3D(filters=32, kernel_size=(3, 3, 3), padding='same', activation='relu')(conv_layer3)
    pooling_layer2 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer4)

    conv_layer5 = Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(pooling_layer2)
    conv_layer6 = Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(conv_layer5)
    conv_layer7 = Conv3D(filters=64, kernel_size=(3, 3, 3), padding='same', activation='relu')(conv_layer6)
    pooling_layer3 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer7)

    conv_layer8 = Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(pooling_layer3)
    conv_layer9 = Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(conv_layer8)
    conv_layer10 = Conv3D(filters=128, kernel_size=(3, 3, 3), padding='same', activation='relu')(conv_layer9)
    pooling_layer4 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer10)

    flatten_layer = Flatten()(pooling_layer4)

    dense_layer1 = Dense(units=256, activation='relu')(flatten_layer)
    # dense_layer1 = Dropout(0.4)(dense_layer1)
    dense_layer2 = Dense(units=256, activation='relu')(dense_layer1)
    dense_layer2 = Dropout(0.4)(dense_layer2)
    output_layer = Dense(units=NUM_CLASSES, activation='softmax')(dense_layer2)

    ## define the model with input layer and output layer
    model = Model(inputs=input_layer, outputs=output_layer)

    return model
Пример #2
0
def con_net():
## input layer
    keras.backend.clear_session()
    input_layer = Input((128,128,128,1))
    ## conv 1
    conv1 = Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu', data_format='channels_last')(input_layer)
    pool1 = MaxPool3D(pool_size=(1,2,2), strides=(1,2,2))(conv1)
    batch1 = BatchNormalization()(pool1)
    ## conv 2
    conv2 = Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu')(batch1)
    pool2 = MaxPool3D(pool_size=(1,2,2), strides=(1,2,2))(conv2)
    batch2 = BatchNormalization()(pool2)
    ## conv 3
    conv3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(batch2)
    conv4 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation='relu')(conv3)
    pool3 = MaxPool3D(pool_size=(1,2,2), strides=(1,2,2))(conv4)
    batch3 = BatchNormalization()(pool3)
    ## conv 4
    conv5 = Conv3D(filters=128, kernel_size=(2, 2, 2), activation='relu')(batch3)
    conv6 = Conv3D(filters=256, kernel_size=(2, 2, 2), activation='relu')(conv5)
    pool4 = MaxPool3D(pool_size=(1,2,2), strides=(1,2,2))(conv6)
    flatten1 = Flatten()(pool4)
    ## 20 features 
    dense1 = Dense(19, activation='linear')(flatten1)
    output_layer = Dense(1, activation='sigmoid')(dense1)
    ## define the model with input layer and output layer
    adam = keras.optimizers.Adam(lr=1e-5)
    model = Model(inputs=input_layer, outputs=output_layer) 
    model.compile(loss='binary_crossentropy', metrics=['binary_crossentropy','accuracy'], optimizer=adam)     
    return model
Пример #3
0
def cnn_3d(num_classes):
    ## input layer
    input_layer = Input((50, 50, 50, 3))

    ## convolutional layers
    conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu')(input_layer)
    conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu')(conv_layer1)

    ## add max pooling to obtain the most imformatic features
    pooling_layer1 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer2)

    conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(pooling_layer1)
    conv_layer4 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation='relu')(conv_layer3)
    pooling_layer2 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer4)

    ## perform batch normalization on the convolution outputs before feeding it to MLP architecture
    pooling_layer2 = BatchNormalization()(pooling_layer2)
    flatten_layer = Flatten()(pooling_layer2)

    ## create an MLP architecture with dense layers : 4096 -> 512 -> 10
    ## add dropouts to avoid overfitting / perform regularization
    dense_layer1 = Dense(units=2048, activation='relu')(flatten_layer)
    dense_layer1 = Dropout(0.4)(dense_layer1)
    dense_layer2 = Dense(units=512, activation='relu')(dense_layer1)
    dense_layer2 = Dropout(0.4)(dense_layer2)
    output_layer = Dense(units=num_classes, activation='softmax')(dense_layer2)

    ## define the model with input layer and output layer
    model = Model(inputs=input_layer, outputs=output_layer)
    return model
Пример #4
0
def model(input_shape=(80, 80, 80, 3), dropout_prob=0.3, classes=4):
    ## input layer
    input_layer = Input(input_shape)

    ## convolutional layers
    conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 3),
                         activation='relu')(input_layer)
    conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 3),
                         activation='relu')(conv_layer1)

    ## add max pooling to obtain the most imformatic features
    pooling_layer1 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer2)

    conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3),
                         activation='relu')(pooling_layer1)
    conv_layer4 = Conv3D(filters=64, kernel_size=(3, 3, 3),
                         activation='relu')(conv_layer3)
    pooling_layer2 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer4)

    ## perform batch normalization on the convolution outputs before feeding it to MLP architecture
    pooling_layer2 = BatchNormalization()(pooling_layer2)
    flatten_layer = Flatten()(pooling_layer2)

    ## add dropouts to avoid overfitting / perform regularization
    dense_layer1 = Dense(units=1024, activation='relu')(flatten_layer)
    dense_layer1 = Dropout(dropout_prob)(dense_layer1)
    dense_layer2 = Dense(units=512, activation='relu')(dense_layer1)
    dense_layer2 = Dropout(dropout_prob)(dense_layer2)
    output_layer = Dense(units=classes, activation='softmax')(dense_layer2)

    ## define the model with input layer and output layer
    model = Model(inputs=input_layer, outputs=output_layer)

    return model
def build_model(categories):
    model = Sequential()
    model.add(Conv3D(input_shape=(5, 112, 112, 1), filters=16, kernel_size=3, 
                     strides=1, padding='same', activation='relu'))
    model.add(MaxPool3D(pool_size=(2,2,2),strides=(1,2,2)))
    model.add(Conv3D(filters=32, kernel_size=3, strides=1, padding='same', 
                     activation='relu'))
    model.add(MaxPool3D(pool_size=(2,2,2),strides=(1,2,2)))
    model.add(Conv3D(filters=64, kernel_size=3, strides=1, padding='same', 
                     activation='relu'))
    model.add(MaxPool3D(pool_size=(2,2,2),strides=(1,2,2)))
    model.add(Conv3D(filters=128, kernel_size=3, strides=1, padding='same', 
                     activation='relu'))
    model.add(MaxPool3D(pool_size=(2,2,2),strides=(1,2,2)))
    
    model.add(Flatten())
    model.add(Dense(512,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(categories, activation='softmax'))
    
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], 
                  optimizer = SGD(lr=0.01, momentum=0.9, clipnorm=1., clipvalue=0.5),
                  )
    return model
Пример #6
0
def create_m3():
    lstmnum = 4
    model = Sequential()
    input1 = keras.layers.Input(shape=(lstmnum, 100, 128, 3))
    lstm_out1 = ConvLSTM2D(filters=1, kernel_size=[3, 3], strides=(1, 1), padding='valid', kernel_constraint=max_norm(2.), activation='relu',
                       input_shape=(lstmnum, 100, 128, 3), return_sequences=True)(input1)
    x = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
                           gamma_initializer='ones', moving_mean_initializer='zeros',
                           moving_variance_initializer='ones')(lstm_out1)
    x = MaxPool3D(pool_size=(2, 2, 2))(x)
    lstm_out2 = ConvLSTM2D(filters=2, kernel_size=[3, 3], strides=(1, 1), padding='valid', activation='relu',
                       return_sequences=True)(x)
    x = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
                           gamma_initializer='ones', moving_mean_initializer='zeros',
                           moving_variance_initializer='ones')(lstm_out2)
    x = MaxPool3D(pool_size=(2, 2, 2))(x)
    x = ConvLSTM2D(filters=3, kernel_size=[3, 3], strides=(1, 1), padding='valid', activation='relu',
                       return_sequences=True)(x)
    x = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
                           gamma_initializer='ones', moving_mean_initializer='zeros',
                           moving_variance_initializer='ones')(x)
    #x = MaxPool3D(pool_size=(2, 2, 2))(x)
    x = ConvLSTM2D(filters=3, kernel_size=[3, 3], strides=(1, 1), padding='valid', activation='relu',
                       return_sequences=True)(x)
    x = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
                           gamma_initializer='ones', moving_mean_initializer='zeros',
                           moving_variance_initializer='ones')(x)
    flat = Flatten()(x)
    out = Dropout(0.3)(flat)
    out = Dense(2, kernel_regularizer=regularizers.l2(0.01), activation='softmax')(out)
    model = keras.models.Model(inputs=input1, outputs=out)
    return model
Пример #7
0
    def CNN(self, input_dim, num_classes):
        model = Sequential()

        model.add(self.Conv(8, (3, 3, 3), input_shape=input_dim))
        model.add(self.Conv(16, (3, 3, 3)))
        # model.add(BatchNormalization())
        model.add(MaxPool3D())
        # model.add(Dropout(0.25))

        model.add(self.Conv(32, (3, 3, 3)))
        model.add(self.Conv(64, (3, 3, 3)))
        model.add(BatchNormalization())
        model.add(MaxPool3D())
        model.add(Dropout(0.25))

        model.add(Flatten())

        model.add(Dense(4096, activation="relu"))
        model.add(Dropout(0.5))

        model.add(Dense(1024, activation="relu"))
        model.add(Dropout(0.5))

        model.add(Dense(num_classes, activation="softmax"))

        return model
Пример #8
0
def creat_model(input_shape, class_num):
    model = Sequential()
    model.add(
        Conv3D(64,
               kernel_size=(8, 8, 8),
               activation='relu',
               input_shape=input_shape))
    #model.add(Conv3D(64, kernel_size=(5,5,5),activation='relu',input_shape=input_shape))
    #model.add(MaxPool3D(pool_size=(2,2,2),strides=1, padding='valid'))
    model.add(MaxPool3D(pool_size=(2, 2, 2), strides=2, padding='valid'))
    model.add(Conv3D(128, (4, 4, 4), activation='relu'))
    #model.add(Conv3D(128,(5,5,5),activation='relu'))
    #model.add(MaxPool3D(pool_size=(2,2,2),strides=1,padding='valid'))
    model.add(MaxPool3D(pool_size=(2, 2, 2), strides=2, padding='valid'))
    model.add(Conv3D(256, (2, 2, 2), activation='relu'))
    #model.add(Conv3D(256,(5,5,5),activation='relu'))
    #model.add(MaxPool3D(pool_size=(2,2,2),strides=1,padding='valid'))
    model.add(MaxPool3D(pool_size=(2, 2, 2), strides=2, padding='valid'))
    model.add(Flatten())
    model.add(Dense(800, activation='relu'))
    #model.add(Dense(1000,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(400, activation='relu'))
    #model.add(Dense(500,activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(200, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(class_num, activation='softmax'))
    return model
Пример #9
0
def c3d_model(input_shape, nb_classes, weight_decay=0.001):
    inputs = Input(input_shape)
    x = Conv3D(64, (3, 3, 3), strides=(1, 1, 1), padding='same',
               activation='relu', kernel_regularizer=l2(weight_decay))(inputs)
    x = MaxPool3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)

    x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same',
               activation='relu', kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same',
               activation='relu', kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
               activation='relu', kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
               activation='relu', kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Flatten()(x)
    x = Dense(2048, activation='relu', kernel_regularizer=l2(weight_decay))(x)
    x = Dropout(0.5)(x)
    x = Dense(2048, activation='relu', kernel_regularizer=l2(weight_decay))(x)
    x = Dropout(0.5)(x)
    x = Dense(nb_classes, kernel_regularizer=l2(weight_decay))(x)
    x = Activation('softmax')(x)

    model = Model(inputs, x)
    return model
Пример #10
0
def cnn():
    ## INPUT LAYER
    input_layer = Input((20, 20, 20, 3))
    
    ## CONVOLUTIONAL LAYERS
    conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu')(input_layer)
    conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu')(conv_layer1)
    
    ## MAXPOOLING LAYER
    pooling_layer1 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer2)
    
    ## CONVOLUTIONAL LAYERS
    conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(pooling_layer1)
    conv_layer4 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation='relu')(conv_layer3)
    
    ## MAXPOOLING LAYER
    pooling_layer2 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer4)
    
    ## BATCH NORMALIZATION ON THE CONVOLUTIONAL OUTPUTS BEFORE FULLY CONNECTED LAYERS
    pooling_layer2 = BatchNormalization()(pooling_layer2)
    flatten_layer = Flatten()(pooling_layer2)
    
    ## FULLY CONNECTED LAYERS/ DROPOUT TO PREVENT OVERFITTING
    dense_layer1 = Dense(units=4096, activation='relu')(flatten_layer)
    dense_layer1 = Dropout(1)(dense_layer1)
    dense_layer2 = Dense(units=512, activation='relu')(dense_layer1)
    dense_layer2 = Dropout(1)(dense_layer2)
    output_layer = Dense(units=2, activation='softmax')(dense_layer2)
    
    ## DEFINE MODEL WITH INPUT AND OUTPUT LAYERS
    model = Model(inputs=input_layer, outputs=output_layer)
    return model
Пример #11
0
def get_model(weights_path=None):
    ins = Input((16, 16, 16, 3))
    con1 = Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu')(ins)
    con2 = Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu')(con1)
    maxp3 = MaxPool3D(pool_size=(2, 2, 2))(con2)
    con4 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(maxp3)
    con5 = Conv3D(filters=64, kernel_size=(3, 3, 3),
                  activation='sigmoid')(con4)
    maxp6 = MaxPool3D(pool_size=(2, 2, 2))(con2)
    batch = BatchNormalization()(maxp6)
    flat = Flatten()(batch)
    dens1 = Dense(units=4096, activation='relu')(flat)
    drop1 = Dropout(0.7)(dens1)
    dens2 = Dense(units=1024, activation='relu')(drop1)
    drop2 = Dropout(0.7)(dens2)
    outs = Dense(units=10, activation='softmax')(drop2)
    model = Model(inputs=ins, outputs=outs)
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(lr=0.01),
                  metrics=['accuracy'])

    if weights_path:
        model.load_weights(weights_path)

    return model
Пример #12
0
def c3d_model(resolution = (112,112), n_frames = 16, channels = 3, nb_classes = 3):
    input_shape = tuple(list(resolution) + [n_frames] + [channels])
    weight_decay = 0.005

    inputs = Input(input_shape)
    x = Conv3D(64,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(inputs)
    x = MaxPool3D((2,2,1),strides=(2,2,1),padding='same')(x)

    x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Flatten()(x)
    x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = Dropout(0.5)(x)
    x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = Dropout(0.5)(x)
    x = Dense(nb_classes,kernel_regularizer=l2(weight_decay))(x)
    x = Activation('softmax')(x)

    model = Model(inputs, x)
    return model
Пример #13
0
    def encoder_dm(self, dm_box):

        #getting dm field to same size as tau field...
        dm0 = dm_box  #UpSampling3D(name="dm_up1")(dm_box)
        dm0 = self.inter_up0(dm0)
        dm0 = self.inter_up0p(dm0)
        #dm0 = UpSampling3D(name="dm_up2")(dm0)

        #step one
        dm1 = self.encode_dm1(dm0)
        dm1 = self.encode_dm1p(dm1)
        dm1_p = MaxPool3D()(dm1)

        #step two
        dm2 = self.encode_dm2(dm1_p)
        dm2_p = MaxPool3D()(dm2)

        #step three
        dm3 = self.encode_dm3(dm2_p)
        dm3 = self.encode_dm3p(dm3)

        dm3_p = MaxPool3D()(dm3)

        #step four
        dm4 = self.encode_dm4(dm3_p)
        dm4 = self.encode_dm4p(dm4)

        dm4 = MaxPool3D()(dm4)
        return dm0, dm1, dm2, dm3, dm4
Пример #14
0
    def encoder_tau(self, x_in, dm0, dm1, dm2, dm3, dm4):

        tau_box = Reshape((3, 64, 64, 64))(x_in)

        #step 1
        tau1 = self.encode_tau1(tau_box)
        tau1 = self.encode_tau1P(tau1)

        #merge + step 2
        tau1_dm1 = concatenate([dm1, tau1], axis=1)
        tau2 = self.encode_tau2(tau1_dm1)
        tau2 = self.encode_tau2P(tau2)
        tau2_p = MaxPool3D()(tau2)

        #merge + step 3

        tau2_dm2 = concatenate([dm2, tau2_p], axis=1)
        tau3 = self.encode_tau3(tau2_dm2)
        tau3 = self.encode_tau3P(tau3)
        tau3 = BatchNormalization()(tau3)
        tau3_p = MaxPool3D()(tau3)

        #merge + step 4
        tau3_dm3 = concatenate([dm3, tau3_p], axis=1)
        tau4 = self.encode_tau4(tau3_dm3)
        tau4 = MaxPool3D()(tau4)  #maybe do something else here? more layers?
        # tau4 = self.encode_tau4p(tau4)
        tau4 = MaxPool3D()(tau4)
        return tau4
Пример #15
0
def example_network(input_shape):

    im_input = Input(shape=input_shape)

    t = Conv3D(64, (11, 11, 11),
               padding='valid',
               kernel_initializer=initializers.truncated_normal(mean=0,
                                                                stddev=0.001),
               bias_initializer=initializers.constant(0.1))(im_input)
    t = Activation('relu')(t)
    t = MaxPool3D(pool_size=(2, 2, 2), padding='valid')(t)

    t = Conv3D(128, (6, 6, 6),
               padding='valid',
               kernel_initializer=initializers.truncated_normal(mean=0,
                                                                stddev=0.001),
               bias_initializer=initializers.constant(0.1))(t)
    t = Activation('relu')(t)
    t = MaxPool3D(pool_size=(2, 2, 2), padding='valid')(t)

    t = Conv3D(256, (3, 3, 3),
               padding="valid",
               kernel_initializer=initializers.truncated_normal(mean=0,
                                                                stddev=0.001),
               bias_initializer=initializers.constant(0.1))(t)
    t = Activation('relu')(t)

    t = Flatten()(t)

    t = Dense(1000,
              kernel_initializer=initializers.truncated_normal(mean=0,
                                                               stddev=1 /
                                                               np.sqrt(1000)),
              bias_initializer=initializers.constant(1.0))(t)
    t = Activation('relu')(t)
    t = Dropout(0.5)(t)

    t = Dense(500,
              kernel_initializer=initializers.truncated_normal(mean=0,
                                                               stddev=1 /
                                                               np.sqrt(500)),
              bias_initializer=initializers.constant(1.0))(t)
    t = Activation('relu')(t)
    t = Dropout(0.5)(t)

    t = Dense(200,
              kernel_initializer=initializers.truncated_normal(mean=0,
                                                               stddev=1 /
                                                               np.sqrt(200)),
              bias_initializer=initializers.constant(1.0))(t)
    t = Activation('relu')(t)
    t = Dropout(0.5)(t)

    t = Dense(1)(t)
    output = Activation('sigmoid')(t)

    model = Model(input=im_input, output=output)

    return model
Пример #16
0
def c3d_model():
    input_shape = (112, 112, 20, 3)
    weight_decay = 0.005
    nb_classes = 101

    inputs = Input(input_shape)
    x = Conv3D(64, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(inputs)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(128, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(128, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(256, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(256, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Flatten()(x)
    x = Dense(512,
              input_dim=4096,
              kernel_initializer='glorot_normal',
              kernel_regularizer=l2(0.001),
              activation='relu')(x)
    x = Dropout(0.6)(x)
    x = Dense(32,
              kernel_initializer='glorot_normal',
              kernel_regularizer=l2(0.001))(x)
    x = Dropout(0.6)(x)
    x = Dense(6,
              kernel_initializer='glorot_normal',
              kernel_regularizer=l2(0.001),
              activation='softmax')(x)

    model = Model(inputs, x)
    return model
Пример #17
0
def con_net():
    ## input layer
    keras.backend.clear_session()
    input_layer = Input((91, 109, 91, 1))
    ## conv 1
    conv1 = Conv3D(filters=8,
                   kernel_size=(3, 3, 3),
                   activation='relu',
                   data_format='channels_last')(input_layer)
    pool1 = MaxPool3D(pool_size=(1, 2, 2), strides=(1, 2, 2))(conv1)
    batch1 = BatchNormalization()(pool1)
    ## conv 2
    conv2 = Conv3D(filters=16, kernel_size=(3, 3, 3),
                   activation='relu')(batch1)
    pool2 = MaxPool3D(pool_size=(1, 2, 2), strides=(1, 2, 2))(conv2)
    batch2 = BatchNormalization()(pool2)
    ## conv 3
    conv3 = Conv3D(filters=32, kernel_size=(3, 3, 3),
                   activation='relu')(batch2)
    conv4 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation='relu')(conv3)
    pool3 = MaxPool3D(pool_size=(1, 2, 2), strides=(1, 2, 2))(conv4)
    batch3 = BatchNormalization()(pool3)
    ## conv 4
    conv5 = Conv3D(filters=128, kernel_size=(2, 2, 2),
                   activation='relu')(batch3)
    conv6 = Conv3D(filters=256, kernel_size=(2, 2, 2),
                   activation='relu')(conv5)
    pool4 = MaxPool3D(pool_size=(1, 2, 2), strides=(1, 2, 2))(conv6)
    flatten1 = Flatten()(pool4)
    ## head 1 - age prediction
    head1 = Dense(1,
                  activation='linear',
                  bias_initializer=keras.initializers.Constant(value=20),
                  name='head1')(flatten1)
    ## head 2 - domain adaptation
    Flip = DA_functions.GradientReversal(1)
    head2_flip = Flip(flatten1)
    head2 = Dense(93,
                  activation='sigmoid',
                  bias_initializer=keras.initializers.Constant(value=0),
                  name='head2')(head2_flip)
    ## define the model with input layer and output layer
    adam = keras.optimizers.Adam(lr=1e-4)
    model = Model(inputs=input_layer, outputs=[head1, head2])
    loss_funcs = {
        "head1": "mean_squared_error",
        "head2": "categorical_crossentropy"
    }
    loss_weights = {"head1": 1.0, "head2": 1.0}
    metrics = {
        "head1": ["mean_squared_error", "mean_absolute_error"],
        "head2": ["categorical_crossentropy", "accuracy"]
    }
    model.compile(loss=loss_funcs,
                  loss_weights=loss_weights,
                  metrics=metrics,
                  optimizer=adam)
    return model
Пример #18
0
def c3d_model(nb_classes=101,
              img_w=112,
              img_h=112,
              num_channels=3,
              include_top=True):
    input_shape = (img_w, img_h, 16, num_channels)
    weight_decay = 0.005

    inputs = Input(input_shape)
    x = Conv3D(64, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(inputs)
    x = MaxPool3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)

    x = Conv3D(128, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(128, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(256, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(256, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Flatten()(x)

    if include_top:
        x = Dense(2048, activation='relu',
                  kernel_regularizer=l2(weight_decay))(x)
        x = Dropout(0.5)(x)
        x = Dense(2048, activation='relu',
                  kernel_regularizer=l2(weight_decay))(x)
        x = Dropout(0.5)(x)
        x = Dense(nb_classes, kernel_regularizer=l2(weight_decay))(x)
        x = Activation('softmax')(x)

    model = Model(inputs, x)
    return model
Пример #19
0
def model_thresholding():
    IMAGE_ORDERING = "channels_first"
    img_input = Input(shape=(1, 240, 240, 48))
    conv_1 = Conv3D(filters=16,
                    kernel_size=(3, 3, 3),
                    padding='same',
                    activation='relu',
                    name="CONV3D_1",
                    dilation_rate=(2, 2, 2),
                    data_format=IMAGE_ORDERING)(img_input)
    maxpool_1 = MaxPool3D(name="MAXPOOL3D_1",
                          data_format=IMAGE_ORDERING)(conv_1)
    conv_2 = Conv3D(filters=32,
                    kernel_size=(3, 3, 3),
                    padding='same',
                    activation='relu',
                    name="CONV3D_2",
                    dilation_rate=(2, 2, 2),
                    data_format=IMAGE_ORDERING)(maxpool_1)
    maxpool_2 = MaxPool3D(name="MAXPOOL3D_2",
                          data_format=IMAGE_ORDERING)(conv_2)
    conv_3 = Conv3D(filters=32,
                    kernel_size=(3, 3, 3),
                    padding='same',
                    activation='relu',
                    name="CONV3D_3",
                    dilation_rate=(2, 2, 2),
                    data_format=IMAGE_ORDERING)(maxpool_2)

    convt_1 = Conv3DTranspose(16,
                              kernel_size=(2, 2, 2),
                              strides=(2, 2, 2),
                              name="CONV3DT_1",
                              activation='relu',
                              data_format=IMAGE_ORDERING)(conv_3)
    concat_1 = Concatenate(axis=1)([convt_1, conv_2])
    conv_4 = Conv3D(filters=16,
                    kernel_size=(3, 3, 3),
                    padding='same',
                    activation='relu',
                    name="CONV3D_4",
                    data_format=IMAGE_ORDERING)(concat_1)
    convt_2 = Conv3DTranspose(4,
                              kernel_size=(2, 2, 2),
                              strides=(2, 2, 2),
                              name="CONV3DT_2",
                              activation='relu',
                              data_format=IMAGE_ORDERING)(conv_4)
    concat_2 = Concatenate(axis=1)([convt_2, conv_1])
    conv_5 = Conv3D(filters=1,
                    kernel_size=(3, 3, 3),
                    padding='same',
                    activation='sigmoid',
                    name="CONV3D_5",
                    data_format=IMAGE_ORDERING)(concat_2)
    return Model(img_input, conv_5)
Пример #20
0
def cnn(input_data_shape, regress=True):
    '''
    Model for SolventNet.
    INPUTS:
        input_data_shape: [tuple]
            input data shape
        regress: [logical, default=True]
            True if you want your model to have a linear regression at the end
    OUTPUT:
        model: [obj]
            tensorflow model
    '''
    ## INPUT LAYER
    input_layer = Input(input_data_shape)

    ## CONVOLUTIONAL LAYERS
    conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 3),
                         activation='relu')(input_layer)
    conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 3),
                         activation='relu')(conv_layer1)

    ## MAXPOOLING LAYER
    pooling_layer1 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer2)

    ## CONVOLUTIONAL LAYERS
    conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3),
                         activation='relu')(pooling_layer1)
    conv_layer4 = Conv3D(filters=64, kernel_size=(3, 3, 3),
                         activation='relu')(conv_layer3)

    ## MAXPOOLING LAYER
    pooling_layer2 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer4)

    ## BATCH NORMALIZATION ON THE CONVOLUTIONAL OUTPUTS BEFORE FULLY CONNECTED LAYERS
    pooling_layer2 = BatchNormalization()(pooling_layer2)
    flatten_layer = Flatten()(pooling_layer2)

    ## FULLY CONNECTED LAYERS/ DROPOUT TO PREVENT OVERFITTING
    dense_layer1 = Dense(units=128, activation='relu')(flatten_layer)
    dense_layer1 = Dropout(1)(dense_layer1)
    dense_layer2 = Dense(units=128, activation='relu')(dense_layer1)
    dense_layer2 = Dropout(1)(dense_layer2)
    dense_layer3 = Dense(units=128, activation='relu')(dense_layer2)
    dense_layer3 = Dropout(1)(dense_layer3)
    #output_layer = Dense(units=2, activation='softmax')(dense_layer1)

    if regress is True:
        ## CREATING LINEAR MODEL
        output_layer = Dense(units=1, activation='linear')(dense_layer3)
    else:
        ## NO OUTER LAYER, JUST DENSE LAYER
        output_layer = dense_layer3

    ## DEFINE MODEL WITH INPUT AND OUTPUT LAYERS
    model = Model(inputs=input_layer, outputs=output_layer)
    return model
def c3d_model():
    input_shape = (112, 112, 16, 3)
    weight_decay = 0.005
    nb_classes = 101

    inputs = Input(input_shape)
    x = Conv3D(64, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(inputs)
    x = MaxPool3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)

    x = Conv3D(128, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(128, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(256, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = Conv3D(256, (3, 3, 3),
               strides=(1, 1, 1),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x4 = Flatten()(x)
    x3 = Dense(2048, activation='relu',
               kernel_regularizer=l2(weight_decay))(x4)
    # x = Dropout(0.5)(x)
    x2 = Dense(2048, activation='relu',
               kernel_regularizer=l2(weight_decay))(x3)
    # x = Dropout(0.5)(x)
    x1 = Dense(nb_classes, kernel_regularizer=l2(weight_decay))(x2)
    # x = Activation('softmax')(x)
    out = concatenate([x1, x2, x3, x4], axis=-1)
    model = Model(inputs, out)
    return model
Пример #22
0
def build_model_2():
    model = Sequential()
    model.add(
        Conv3D(32,
               kernel_size=(2, 2, 2),
               padding='same',
               activation='relu',
               input_shape=input_shape))
    model.add(MaxPool3D(pool_size=(2, 2, 2)))
    model.add(
        Conv3D(64, kernel_size=(2, 2, 2), padding='same', activation='relu'))
    model.add(MaxPool3D(pool_size=(2, 2, 2)))
    model.add(
        Conv3D(128, kernel_size=(2, 2, 2), padding='same', activation='relu'))
    model.add(MaxPool3D(pool_size=(2, 2, 2)))
    model.add(
        Conv3D(256, kernel_size=(2, 2, 2), padding='same', activation='relu'))
    model.add(MaxPool3D(pool_size=(2, 2, 2)))
    model.add(
        Conv3D(512, kernel_size=(2, 2, 2), padding='same', activation='relu'))
    model.add(MaxPool3D(pool_size=(2, 2, 2)))

    model.add(UpSampling3D(size=(2, 2, 2)))
    model.add(
        Deconv3D(512, kernel_size=(2, 2, 2), padding='same',
                 activation='relu'))
    model.add(UpSampling3D(size=(2, 2, 2)))
    model.add(
        Deconv3D(256, kernel_size=(2, 2, 2), padding='same',
                 activation='relu'))
    model.add(UpSampling3D(size=(2, 2, 2)))
    model.add(
        Deconv3D(128, kernel_size=(2, 2, 2), padding='same',
                 activation='relu'))
    model.add(UpSampling3D(size=(2, 2, 2)))
    model.add(
        Deconv3D(64, kernel_size=(2, 2, 2), padding='same', activation='relu'))
    model.add(UpSampling3D(size=(2, 2, 2)))
    model.add(
        Deconv3D(32, kernel_size=(2, 2, 2), padding='same', activation='relu'))
    #        model.add(Deconv3D(1, kernel_size=(2, 2, 2), padding='same', activation='relu'))
    model.add(Dense(1))
    #        model.add(Lambda(lambda x: scale(x)))

    model.compile(loss='mse',
                  metrics=[
                      keras.metrics.MeanIoU(num_classes=2),
                      tf.keras.metrics.MeanSquaredError()
                  ],
                  optimizer='adam')
    model.summary()

    return model
Пример #23
0
def cnn_3d(input_shape, c, lr):
    inputs = Input(shape=input_shape)

    x = Conv3D(12, (3, 3, 25), padding='same')(inputs)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)

    x = Conv3D(24, (3, 3, 25), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)

    x = Conv3D(48, (3, 3, 25), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activation='relu')(x)
    x = MaxPool3D(pool_size=(2, 2, 2), padding='valid')(x)

    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    outputs = Dense(c, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer=Adam(lr=lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
Пример #24
0
    def build_model(self, config):
        input_shape = config['input_shape']
        # input_shape is (None, rows, cols, timesteps)
        model = self.model

        model.add(Conv3D(32, kernel_size=(3, 2, 4), strides=(1, 1, 2),
                         input_shape=(input_shape[1],
                                      input_shape[2],
                                      input_shape[3], 1)))
        model.add(BatchNormalization(axis=2))
        model.add(MaxPool3D(pool_size=(2, 2, 1)))
        model.add(Conv3D(1, kernel_size=(1, 1, 1)))
        model.add(Reshape(target_shape=(
        model.layers[-1].output_shape[1] * model.layers[-1].output_shape[2],
        model.layers[-1].output_shape[3])))
        model.add(GRU(20))
        model.add(Dense(32))
        model.add(Dropout(0.5))
        model.add(Dense(4, activation='softmax'))
        optimizer = Adam(learning_rate=config['lr'], beta_1=0.9, beta_2=0.999,
                         amsgrad=False)
        model.compile(optimizer=optimizer,
                      loss=CategoricalCrossentropy(from_logits=True),
                      metrics=['accuracy'])
        model.summary()
        print("Model compiled.")
Пример #25
0
def inception_3d_v2(input):
    input = Input(shape=input)
    tower1 = Conv3D(12, (1, 1, 32), padding='same')(input)
    tower1 = BatchNormalization()(tower1)
    tower1 = Activation(activation='relu')(tower1)
    tower1 = Conv3D(24, (3, 3, 32), padding='same')(tower1)
    tower1 = BatchNormalization()(tower1)
    tower1 = Activation(activation='relu')(tower1)
    tower1 = Conv3D(48, (3, 3, 32), padding='same')(tower1)
    tower1 = BatchNormalization()(tower1)
    tower1 = Activation(activation='relu')(tower1)

    tower2 = Conv3D(12, (1, 1, 32), padding='same')(input)
    tower2 = BatchNormalization()(tower2)
    tower2 = Activation(activation='relu')(tower2)
    tower2 = Conv3D(24, (3, 3, 32), padding='same')(tower2)
    tower2 = BatchNormalization()(tower2)
    tower2 = Activation(activation='relu')(tower2)

    tower3 = Conv3D(12, (1, 1, 32), padding='same')(input)
    tower3 = BatchNormalization()(tower3)
    tower3 = Activation(activation='relu')(tower3)

    tower4 = MaxPool3D(pool_size=(3, 3, 32), strides=1, padding='same')(input)
    tower4 = Conv3D(12, (1, 1, 32), padding='same')(tower4)
    tower4 = BatchNormalization()(tower4)
    tower4 = Activation(activation='relu')(tower4)

    output = concatenate([tower1, tower2, tower3, tower4], axis=-1)
    return output
Пример #26
0
    def __init__(self, shape, xtrain, xtest, ytrain, ytest):
        self.shape = shape
        self.xtrain = xtrain
        self.xtest = xtest
        ytrain = keras.utils.to_categorical(ytrain, 2)
        ytest = keras.utils.to_categorical(ytest, 2)
        self.ytrain = ytrain
        self.ytest = ytest

        input_layer = Input(shape)

        conv_layer1 = Conv3D(filters=8,
                             kernel_size=(4, 4, 4),
                             activation='relu')(input_layer)
        conv_layer2 = Conv3D(filters=16,
                             kernel_size=(4, 4, 4),
                             activation='relu')(conv_layer1)

        pooling_layer1 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer2)

        pooling_layer2 = BatchNormalization()(pooling_layer1)
        flatten_layer = Flatten()(pooling_layer2)

        dense_layer1 = Dense(units=5120, activation='relu')(flatten_layer)
        dense_layer1 = Dropout(0.4)(dense_layer1)
        dense_layer2 = Dense(units=2048, activation='relu')(dense_layer1)
        dense_layer2 = Dropout(0.4)(dense_layer2)
        output_layer2 = Dense(units=2, activation='softmax')(dense_layer2)

        self.model = Model(inputs=input_layer, outputs=output_layer2)
Пример #27
0
def trial_3Dcnn(input_shape=(18, 18, 18, 4), class_num=2):
    """Example CNN
    Keyword Arguments:
        input_shape {tuple} -- shape of input images. Should be (28,28,1) for MNIST and (32,32,3) for CIFAR (default: {(28,28,1)})
        class_num {int} -- number of classes. Shoule be 10 for both MNIST and CIFAR10 (default: {10})
    Returns:
        model -- keras.models.Model() object
    """

    model = Sequential()
    model.add(
        Conv3D(32,
               kernel_size=(3, 3, 3),
               activation='relu',
               dilation_rate=2,
               input_shape=input_shape))
    model.add(Conv3D(64, (3, 3, 3), activation='relu', dilation_rate=2))
    model.add(MaxPool3D(pool_size=(2, 2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(class_num, activation='softmax'))

    return model
Пример #28
0
def VoxNet(n_classes, input_shape=(32, 32, 32), weights=None):
    if K.image_data_format() == "channels_last":
        input_shape = input_shape + (1, )
    else:
        input_shape = (1, ) + input_shape

    data_input = Input(input_shape)

    conv1 = Conv3D(32, (5, 5, 5),
                   strides=(2, 2, 2),
                   activation=LeakyReLU(0.1),
                   name="conv1")(data_input)
    drop1 = Dropout(0.2, name="drop1")(conv1)
    conv2 = Conv3D(32, (3, 3, 3), activation=LeakyReLU(0.1),
                   name="conv2")(drop1)
    pool2 = MaxPool3D(name="pool2")(conv2)
    drop2 = Dropout(0.3, name="drop2")(pool2)
    flat = Flatten()(drop2)
    fc1 = Dense(128, activation="relu", name="fc1")(flat)
    drop3 = Dropout(0.4, name="drop3")(fc1)
    fc2 = Dense(n_classes, activation="softmax", name="fc2")(drop3)

    model = Model(data_input, fc2)

    if weights is not None:
        model.load_weights(weights)

    return model
Пример #29
0
    def model_load(shape, file):
        input_layer = Input((shape, 64))

        conv_layer1 = Conv3D(filters=8,
                             kernel_size=(4, 4, 4),
                             activation='relu')(input_layer)
        conv_layer2 = Conv3D(filters=16,
                             kernel_size=(4, 4, 4),
                             activation='relu')(conv_layer1)

        pooling_layer1 = MaxPool3D(pool_size=(2, 2, 2))(conv_layer2)

        pooling_layer2 = BatchNormalization()(pooling_layer1)
        flatten_layer = Flatten()(pooling_layer2)

        dense_layer1 = Dense(units=5120, activation='relu')(flatten_layer)
        dense_layer1 = Dropout(0.4)(dense_layer1)
        dense_layer2 = Dense(units=2048, activation='relu')(dense_layer1)
        dense_layer2 = Dropout(0.4)(dense_layer2)
        output_layer2 = Dense(units=2, activation='softmax')(dense_layer2)

        model = Model(inputs=input_layer, outputs=output_layer2)
        model.load_weights(file)
        print("Model loaded")
        print("Model Summary:\n", model.summary())
        self.model = model
        return model
Пример #30
0
def DCAE_v2_feature(weight_decay=0.0005):
    model = Sequential()
    model.add(
        Conv3D(filters=24,
               input_shape=(224, 145, 145, 1),
               kernel_size=(24, 3, 3),
               strides=(1, 1, 1),
               kernel_regularizer=regularizers.l2(l=weight_decay),
               padding='valid',
               name="Conv1"))
    model.add(BatchNormalization(name="BN1"))
    model.add(PReLU(name="PReLU1"))

    model.add(
        Conv3D(filters=48,
               kernel_size=(24, 3, 3),
               strides=(1, 1, 1),
               kernel_regularizer=regularizers.l2(l=weight_decay),
               padding='valid',
               name="Conv2"))
    model.add(BatchNormalization(name="BN2"))
    model.add(PReLU(name="PReLU2"))

    model.add(MaxPool3D(pool_size=(18, 1, 1), strides=(18, 1, 1),
                        name="Pool1"))
    return model