def build_model(self, input_shape): # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image! X_input = Input(input_shape) # [1] First CONV--POOL--ReLU layer X = Conv3D(filters=16, kernel_size=3, strides=(1,1,1), padding='same', activation='elu')(X_input) X = AveragePooling3D(strides=(1,1,1), padding="same")(X) X = Dropout(self.dropout)(X) # [2] Second CONV--POOL--ReLU layer X = Conv3D(filters=32, kernel_size=3, strides=(1,1,1), padding='same', activation='elu')(X) X = AveragePooling3D(strides=(1,1,1), padding="same")(X) X = Dropout(self.dropout)(X) # [3] Third CONV--POOL--ReLU layer X = Conv3D(filters=64, kernel_size=3, strides=(1,1,1), padding='same', activation='elu')(X) X = AveragePooling3D(strides=(1,1,1), padding="same")(X) X = Dropout(self.dropout)(X) # Final prediction (sigmoid) X = Conv3D(filters=1, kernel_size=1, activation='sigmoid')(X) # Create instance of Baseline Model model = Model(inputs=X_input, outputs=X, name='BaselineModel') self.model = model
def __init__(self, num_Com_layer): self.num_Com_layer = num_Com_layer self.Layer_List = list() self.Layer_List.append( Conv3D(128, (2, 3, 3), padding='same', name='conv1')) self.Layer_List.append(BatchNormalization(name='batch_norm1')) self.Layer_List.append(Activation("relu", name='relu1')) self.Layer_List.append( Conv3D(128, (2, 3, 3), padding='same', name='conv2')) self.Layer_List.append(BatchNormalization(name='batch_norm2')) self.Layer_List.append(Activation("relu", name='relu2')) self.Layer_List.append( AveragePooling3D(2, strides=2, name='block1_pool')) self.Layer_List.append( Conv3D(64, (2, 3, 3), padding='same', name='conv3')) self.Layer_List.append(BatchNormalization(name='batch_norm3')) self.Layer_List.append(Activation("relu", name='relu3')) self.Layer_List.append( Conv3D(64, (2, 3, 3), padding='same', name='conv4')) self.Layer_List.append(BatchNormalization(name='batch_norm4')) self.Layer_List.append(Activation("relu", name='relu4')) self.Layer_List.append( AveragePooling3D(2, strides=2, name='block2_pool'))
def get_mini_model(width=128, height=128, depth=64): """Build a 3D convolutional neural network model.""" inputs = keras.Input((depth, height, width, 1)) x = Conv3D(filters=64, kernel_size=3, activation="relu")(inputs) x = MaxPooling3D(pool_size=2)(x) x = BatchNormalization()(x) # x = Conv3D(filters=64, kernel_size=3, activation="relu")(x) # x = MaxPooling3D(pool_size=2)(x) # x = BatchNormalization()(x) x = Conv3D(filters=128, kernel_size=3, activation="relu")(x) x = AveragePooling3D(pool_size=2)(x) x = BatchNormalization()(x) x = Conv3D(filters=256, kernel_size=3, activation="relu")(x) x = AveragePooling3D(pool_size=2)(x) x = BatchNormalization()(x) x = GlobalAveragePooling3D()(x) x = Dense(units=512, activation="relu")(x) # x = Dropout(0.3)(x) # x = Flatten()(x) # outputs = Dense(units=4, activation="softmax")(x) # Define the model. model = keras.Model(inputs, x) return model
def I2INet3D(input_shape=(64,64,64,1), Nfilters=32, l2_reg=0.0): inp = Input(shape=input_shape) #First convolution layer x = Conv3D(Nfilters,(3,3,3), padding='same',activation='relu')(inp) x = Conv3D(Nfilters,(3,3,3), padding='same',activation='relu')(x) out_1 = x x = AveragePooling3D()(x) #second convolution layer Nfilter2 = 4*Nfilters x = Conv3D(Nfilter2,(3,3,3), padding='same',activation='relu')(x) x = Conv3D(Nfilter2,(3,3,3), padding='same',activation='relu')(x) out_2 = x x = AveragePooling3D()(x) #third convolution layer Nfilter3 = 8*Nfilters x = Conv3D(Nfilter3,(3,3,3), padding='same',activation='relu')(x) x = Conv3D(Nfilter3,(3,3,3), padding='same',activation='relu')(x) x = Conv3D(Nfilter3,(3,3,3), padding='same',activation='relu')(x) out_3 = x x = AveragePooling3D()(x) #fourth convolution layer Nfilter4 = 16*Nfilters x = Conv3D(Nfilter4,(3,3,3), padding='same',activation='relu')(x) x = Conv3D(Nfilter4,(3,3,3), padding='same',activation='relu')(x) x = Conv3D(Nfilter4,(3,3,3), padding='same',activation='relu')(x) out_4 = UpSampling3D()(x) #################### # Second branch #################### s = merge([out_3,out_4], mode='concat', concat_axis=4) x = Conv3D(Nfilter4, (1,1,1), padding='same',activation='relu')(s) x = Conv3D(Nfilter3, (3,3,3), padding='same',activation='relu')(x) x = Conv3D(Nfilter3, (3,3,3), padding='same',activation='relu')(x) s_out_1 = Conv3D(1, (1,1,1), padding='same',activation='sigmoid')(x) #second upsample x = UpSampling3D()(x) s = merge([out_2,x], mode='concat', concat_axis=4) x = Conv3D(Nfilter3, (1,1,1), padding='same',activation='relu')(s) x = Conv3D(Nfilter2, (3,3,3), padding='same',activation='relu')(x) x = Conv3D(Nfilter2, (3,3,3), padding='same',activation='relu')(x) s_out_2 = Conv3D(1, (1,1,1), padding='same',activation='sigmoid')(x) #final upsample x = UpSampling3D()(x) s = merge([out_1,x], mode='concat', concat_axis=4) x = Conv3D(Nfilter2, (1,1,1), padding='same',activation='relu')(s) x = Conv3D(Nfilters, (3,3,3), padding='same',activation='relu')(x) x = Conv3D(Nfilters, (3,3,3), padding='same',activation='relu')(x) s_out_3 = Conv3D(1, (1,1,1), padding='same',activation='sigmoid')(x) i2i = Model(inp,[s_out_3,s_out_2,s_out_1]) return i2i
def setup_channel_vgg(autoencoder_stage, options_dict, modelpath_and_name=None): dropout_for_dense = options_dict["dropout_for_dense"] unlock_BN_in_encoder = False batchnorm_for_dense = True number_of_filters_in_input = options_dict["number_of_filters_in_input"] #either 1 (no channel id) or 31 train=False if autoencoder_stage == 1 else True #Freeze Encoder layers in encoder+ stage channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filter_base=[32,32,64] inputs = Input(shape=(11,13,18,number_of_filters_in_input)) x=conv_block(inputs, filters=filter_base[0], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #11x18x50 x=conv_block(x, filters=filter_base[0], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #11x18x50 x = AveragePooling3D((2, 2, 2), padding='same')(x) #11x18x25 x=conv_block(x, filters=filter_base[1], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #11x18x25 x=conv_block(x, filters=filter_base[1], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #10x16x24 x = AveragePooling3D((2, 2, 2), padding='same')(x) #5x8x12 x=conv_block(x, filters=filter_base[2], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #5x8x12 x=conv_block(x, filters=filter_base[2], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #4x6x10 x=conv_block(x, filters=filter_base[2], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #4x6x10 x = AveragePooling3D((2, 2, 2), padding='same')(x) #2x3x5 x = Flatten()(x) x = dense_block(x, units=256, channel_axis=channel_axis, batchnorm=batchnorm_for_dense, dropout=dropout_for_dense) x = dense_block(x, units=16, channel_axis=channel_axis, batchnorm=batchnorm_for_dense, dropout=dropout_for_dense) outputs = Dense(2, activation='softmax', kernel_initializer='he_normal')(x) model = Model(inputs=inputs, outputs=outputs) return model
def unet(params): inputs = Input(shape = (params.n_scans, params.img_shape, params.img_shape, 3)) # contracting path c1 = conv3d_block(inputs, n_filters = params.n_filters * 1) p1 = MaxPooling3D((2, 2, 2))(c1) c2 = conv3d_block(p1, n_filters = params.n_filters * 2) p2 = MaxPooling3D((4, 2, 2))(c2) # reshape 3d image to 2d p2 = Reshape(target_shape = [int_shape(p2)[-3], int_shape(p2)[-2], int_shape(p2)[-1]])(p2) c3 = conv2d_block(p2, n_filters = params.n_filters * 4, kernel_size = 3) p3 = MaxPooling2D((2, 2))(c3) c4 = conv2d_block(p3, n_filters = params.n_filters * 8, kernel_size = 3) p4 = MaxPooling2D(pool_size = (2, 2))(c4) c5 = conv2d_block(p4, n_filters = params.n_filters * 8, kernel_size = 3) p5 = MaxPooling2D(pool_size = (2, 2))(c5) p5 = Dropout(params.dropout)(p5) c7 = conv2d_block(p5, n_filters = params.n_filters * 8, kernel_size = 3) u7 = Conv2DTranspose(params.n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c7) u7 = concatenate([u7, c5]) c8 = conv2d_block(u7, n_filters = params.n_filters * 8, kernel_size = 3) u8 = Conv2DTranspose(params.n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c8) u8 = concatenate([u8, c4]) c9 = conv2d_block(u8, n_filters = params.n_filters * 8, kernel_size = 3) u9 = Conv2DTranspose(params.n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c9) u9 = concatenate([u9, c3]) c10 = conv2d_block(u9, n_filters = params.n_filters * 4, kernel_size = 3) u10 = Conv2DTranspose(params.n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c10) # average pooling for concatenation c2_concat = AveragePooling3D(pool_size = (int_shape(c2)[-4], 1, 1))(c2) concat_shape = [int_shape(c2_concat)[-3], int_shape(c2_concat)[-2], int_shape(c2_concat)[-1]] c2_concat = Reshape(target_shape = concat_shape)(c2_concat) u10 = concatenate([u10, c2_concat]) c11 = conv2d_block(u10, n_filters = params.n_filters * 2, kernel_size = 3) u11 = Conv2DTranspose(params.n_filters, (3, 3), strides = (2, 2), padding = 'same')(c11) # average pooling for concatenation c1_concat = AveragePooling3D(pool_size = (int_shape(c1)[-4], 1, 1))(c1) concat_shape = [int_shape(c1_concat)[-3], int_shape(c1_concat)[-2], int_shape(c1_concat)[-1]] c1_concat = Reshape(target_shape = concat_shape)(c1_concat) u11 = concatenate([u11, c1_concat]) c12 = conv2d_block(u11, n_filters = params.n_filters * 2, kernel_size = 3) outputs = Conv2D(params.num_classes, (1, 1), activation = 'softmax', name = "15_class")(c12) model = Model(inputs = inputs, outputs = [outputs]) return model
def build_model_okg25s25(conv3d_chtype, input_shape, ndense_layers, nunits, nfilters): # Because we will need to instantiate # the same model multiple times, # we use a function to construct it. """ Personal model """ print(conv3d_chtype) if "last" in conv3d_chtype: K.set_image_data_format('channels_last') else: K.set_image_data_format('channels_first') model = Sequential() model.add(BatchNormalization(input_shape=input_shape)) # input_shape=input_shape, model.add(Conv3D(nfilters, kernel_size=(2, 2, 2), strides=(1, 1, 1))) model.add(LeakyReLU()) model.add(AveragePooling3D(pool_size=(2, 2, 2))) model.add(Conv3D(nfilters * 2, kernel_size=(2, 2, 2), strides=(1, 1, 1))) model.add(LeakyReLU()) model.add(AveragePooling3D(pool_size=(2, 2, 2))) """ model.add(Conv3D(nfilters*4, kernel_size=(2, 2, 2), strides=(1,1,1))) model.add(LeakyReLU()) model.add(AveragePooling3D(pool_size=(2, 2, 2))) """ # model.add(MaxPooling3D(pool_size=(3, 3, 3))) model.add(Flatten()) #model.add(Dense(nunits, kernel_regularizer=l2(0.001))) model.add(Dense(nunits)) model.add(LeakyReLU()) #model.add(Dense(int(nunits/2), kernel_regularizer=l2(0.001))) model.add(Dense(nunits)) model.add(LeakyReLU()) model.add(Dense(nunits)) model.add(LeakyReLU()) model.add(Dense(1)) model.compile(optimizer=optimizers.Adam(lr=0.00001), loss='mse', metrics=['mse', 'mae']) """ model.compile(optimizer=optimizers.Adam(), loss='mse', metrics=['mse', 'mae']) """ return model
def build_2DData_model(conv3d_chtype, input_shape1, input_shape2, ndense_layers, nunits, nfilters): """ Working with keras 2.2.4 Personal model """ alpha = 0.3 print(conv3d_chtype) if "last" in conv3d_chtype: K.set_image_data_format('channels_last') else: K.set_image_data_format('channels_first') "Model branch 1" in1 = Input(shape=input_shape1) m1 = BatchNormalization()(in1) m1 = Conv3D(nfilters, kernel_size=(2, 2, 2), strides=(1, 1, 1))(m1) m1 = LeakyReLU(alpha)(m1) m1 = AveragePooling3D(pool_size=(2, 2, 2))(m1) m1 = Conv3D(nfilters * 2, kernel_size=(2, 2, 2), strides=(1, 1, 1))(m1) m1 = LeakyReLU(alpha)(m1) m1 = AveragePooling3D(pool_size=(2, 2, 2))(m1) m1 = Flatten()(m1) "Model branch 2" in2 = Input(shape=(input_shape2, )) m2 = Dense(nunits)(in2) m2 = LeakyReLU(alpha)(m2) m2 = Dense(nunits)(m2) m2 = LeakyReLU(alpha)(m2) m2 = Dense(nunits)(m2) m2 = LeakyReLU(alpha)(m2) "Concatenation" concat = Concatenate()([m1, m2]) out = Dense(nunits)(concat) out = LeakyReLU(alpha)(out) out = Dense(nunits)(out) out = LeakyReLU(alpha)(out) out = Dense(nunits)(out) out = LeakyReLU(alpha)(out) out = Dense(nunits)(out) out = LeakyReLU(alpha)(out) out = Dense(1)(out) fmodel = Model([in1, in2], out) fmodel.compile(optimizer=optimizers.Adam(lr=0.00001), loss='mse', metrics=['mse', 'mae']) return fmodel
def model2(main_input, time): # 3d cnn + attention #shape : (data, time, ch1, ch2, freq) pool_size1 = (1, 4, 2) pool_size2 = (1, 2, 4) activation_name = 'elu' # Input main_batch_norm = BatchNormalization()(main_input) conv3d = Conv3D(kernel_size=(4, 5, 2), strides=(1, 1, 1), filters=128, padding='same')(main_input) batch_norm = BatchNormalization()(conv3d) conv = Activation(activation_name)(batch_norm) up_sample1 = AveragePooling3D(pool_size=pool_size1, strides=(1, 1, 1), padding='same')(conv) up_sample2 = AveragePooling3D(pool_size=pool_size2, strides=(1, 1, 1), padding='same')(conv) conv_concat = concatenate([main_batch_norm, conv, up_sample1, up_sample2]) conv_1d = Conv3D(kernel_size=(1, 1, 1), strides=(1, 1, 1), filters=16)(conv_concat) batch_norm = BatchNormalization()(conv_1d) activation = Activation(activation_name)(batch_norm) bidir_rnn = Reshape((time, -1))(activation) bidir_rnn1 = Bidirectional( LSTM(400, dropout=0.5, recurrent_dropout=0.5, return_sequences=True))(bidir_rnn) bidir_rnn2 = Bidirectional( LSTM(400, dropout=0.5, recurrent_dropout=0.5, return_sequences=True))(bidir_rnn1) # bidir_rnn1 = Bidirectional(GRU(100, dropout=0.5, recurrent_dropout=0.5, return_sequences=True))(bidir_rnn) # bidir_rnn2 = Bidirectional(GRU(100, dropout=0.5, recurrent_dropout=0.5, return_sequences=True))(bidir_rnn1) permute = Permute((2, 1))(bidir_rnn2) dense_1 = Dense(time, activation='softmax')(permute) prob = Permute((2, 1), name='attention_vec')(dense_1) prob = Reshape((-1, 31, 4, 5, 40))(prob) attention_mul = multiply([main_input, prob]) attention_mul = Flatten()(attention_mul) drop = Dropout(0.5)(attention_mul) dense_2 = Dense(200)(drop) main_output = Dense(2, activation='softmax')(dense_2) return main_output
def setup_channel_tiny(autoencoder_stage, options_dict, modelpath_and_name=None): #dropout_for_dense = 0 #options_dict["dropout_for_dense"] n_bins=(11,13,18,31) neurons_in_bottleneck = options_dict["neurons_in_bottleneck"] # for time distributed wrappers: (batchsize, timesteps, n_bins) #inputs = Input(shape=(31,11,13,18)) #x = TimeDistributed( Dense(8), input_shape=(10, 16) )(inputs) channel_axis = 1 if K.image_data_format() == "channels_first" else -1 if autoencoder_stage==0: inputs = Input(shape=(n_bins[-1],)) x = Dense(neurons_in_bottleneck)(inputs) outputs = Dense(31)(x) model = Model(inputs=inputs, outputs=outputs) else: inputs = Input(shape=n_bins) encoded = Dense(inputs)(neurons_in_bottleneck, trainable=(autoencoder_stage!=1)) if autoencoder_stage == 1: #Load weights of encoder part from existing autoencoder encoder = Model(inputs=inputs, outputs=encoded) autoencoder = load_model(modelpath_and_name) for i,layer in enumerate(encoder.layers): layer.set_weights(autoencoder.layers[i].get_weights()) filter_base=[32,32,64] train=True unlock_BN_in_encoder=False x=conv_block(encoded, filters=filter_base[0], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #11x18x50 x=conv_block(x, filters=filter_base[0], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #11x18x50 x = AveragePooling3D((2, 2, 2), padding='same')(x) #11x18x25 x=conv_block(x, filters=filter_base[1], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #11x18x25 x=conv_block(x, filters=filter_base[1], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #10x16x24 x = AveragePooling3D((2, 2, 2), padding='same')(x) #5x8x12 x=conv_block(x, filters=filter_base[2], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #5x8x12 x=conv_block(x, filters=filter_base[2], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #4x6x10 x=conv_block(x, filters=filter_base[2], kernel_size=(3,3,3), padding="same", trainable=train, channel_axis=channel_axis, BNunlock=unlock_BN_in_encoder) #4x6x10 x = AveragePooling3D((2, 2, 2), padding='same')(x) #2x3x5 x = Flatten()(x) x = dense_block(x, units=256, channel_axis=channel_axis, batchnorm=True, dropout=0.2) x = dense_block(x, units=16, channel_axis=channel_axis, batchnorm=True, dropout=0.2) outputs = Dense(2, activation='softmax', kernel_initializer='he_normal')(x) model = Model(inputs=inputs, outputs=outputs) return model
def build_model(): inp = Input(shape=(51, 51, 51, 1)) x = Convolution3D(16, 3, 3, 3, init='glorot_normal', border_mode='same', dim_ordering='tf', W_regularizer=l2(0.001))(inp) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(2, 2, 2))(x) x = Convolution3D(32, 3, 3, 3, init='glorot_normal', border_mode='same', W_regularizer=l2(0.001))(x) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(2, 2, 2))(x) x = Convolution3D(64, 3, 3, 3, init='glorot_normal', border_mode='same', W_regularizer=l2(0.001))(x) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(2, 2, 2))(x) x = Convolution3D(128, 3, 3, 3, init='glorot_normal', border_mode='same', W_regularizer=l2(0.001))(x) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(2, 2, 2))(x) x = Convolution3D(256, 3, 3, 3, init='glorot_normal', border_mode='same', W_regularizer=l2(0.001))(x) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(2, 2, 2))(x) feature = Flatten()(x) model = Model(inp, feature) return model
def build_model(self, input_shape): # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image! X_input = Input(input_shape) D1 = self.double_block(X_input, [64,64], 3, 1) D2_in = AveragePooling3D(strides=(2,2,2), padding="same")(D1) D2_out = self.double_block(D2_in, [128, 128], 3, 1) D2_in = Conv3D(128, kernel_size=1, strides=(1,1,1), padding='same')(D2_in) D2 = Add()([D2_in, D2_out]) D3_in = AveragePooling3D(strides=(2,2,2), padding="same")(D2) D3_out = self.double_block(D3_in, [256,256], 3, 1) D3_in = Conv3D(256, kernel_size=1, strides=(1,1,1), padding='same')(D3_in) D3 = Add()([D3_in, D3_out]) D4_in = AveragePooling3D(strides=(2,2,2), padding="same")(D3) D4_out = self.double_block(D4_in, [512,512], 3, 1) D4_in = Conv3D(512, kernel_size=1, strides=(1,1,1), padding='same')(D4_in) D4 = Add()([D4_in, D4_out]) U3_in = Conv3D(512, 2, padding='same')(UpSampling3D(size = (2,2,2), dim_ordering="tf")(D4)) U3 = Activation('elu')(U3_in) U3 = Concatenate()( [D3, U3]) U3_out = self.double_block(U3, [256, 256], 3 ,1 ) U3_in = Conv3D(256, kernel_size=1, strides=(1,1,1), padding='same')(U3_in) U3 = Add()([U3_in, U3_out]) U2_in = Conv3D(256, 2, padding='same')(UpSampling3D(size = (2,2,2), dim_ordering="tf")(U3)) U2 = Activation('elu')(U2_in) U2 = Concatenate()( [D2, U2]) U2_out = self.double_block(U2, [128, 128], 3 ,1 ) U2_in = Conv3D(128, kernel_size=1, strides=(1,1,1), padding='same')(U2_in) U2 = Add()([U2_in, U2_out]) U1_in = Conv3D(128, 2, padding='same')(UpSampling3D(size = (2,2,2), dim_ordering="tf")(U2)) U1 = Activation('elu')(U1_in) U1 = Concatenate()( [D1, U1]) U1_out = self.double_block(U1, [64, 64], 3 ,1 ) U1_in = Conv3D(64, kernel_size=1, strides=(1,1,1), padding='same')(U1_in) U1 = Add()([U1_in, U1_out]) pred = Conv3D(filters=1, kernel_size=1, activation='sigmoid')(U1) # Create instance of Baseline Model model = Model(inputs=X_input, outputs=pred, name='BaselineModel') self.model = model
def transition_SE_layer_3D(input_tensor, numFilters, compressionFactor=1.0, se_ratio=16): numOutPutFilters = int(numFilters * compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv3D(numOutPutFilters, (1, 1, 1), strides=(1, 1, 1), padding='same', kernel_initializer='he_normal')(x) # SE Block x = squeeze_excitation_block_3D(x, ratio=se_ratio) #x = BatchNormalization(axis=bn_axis)(x) # downsampling x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='')(x) #x = squeeze_excitation_block(x, ratio=se_ratio) return x, numOutPutFilters
def inception_block(x, filters=256): shrinkaged_filters = int(filters * INCEPTION_ENABLE_DEPTHWISE_SEPARABLE_CONV_SHRINKAGE) b0 = conv_bn_relu(x, filters=filters, kernel_size=(1, 1, 1)) b1 = conv_bn_relu(x, filters=shrinkaged_filters, kernel_size=(1, 1, 1)) b1 = conv_bn_relu(b1, filters=filters, kernel_size=(3, 3, 3)) b2 = conv_bn_relu(x, filters=shrinkaged_filters, kernel_size=(1, 1, 1)) b2 = conv_bn_relu(b2, filters=filters, kernel_size=(3, 3, 3)) b2 = conv_bn_relu(b2, filters=filters, kernel_size=(3, 3, 3)) b3 = AveragePooling3D(pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(x) b3 = conv_bn_relu(b3, filters=filters, kernel_size=(1, 1, 1)) bs = [b0, b1, b2, b3] print('inception_block') print(b0.get_shape()) print(b1.get_shape()) print(b2.get_shape()) print(b3.get_shape()) if INCEPTION_ENABLE_SPATIAL_SEPARABLE_CONV: b4 = conv_bn_relu(x, filters=shrinkaged_filters, kernel_size=(1, 1, 1)) b4 = conv_bn_relu(b4, filters=filters, kernel_size=(5, 1, 1)) b4 = conv_bn_relu(b4, filters=filters, kernel_size=(1, 5, 1)) b4 = conv_bn_relu(b4, filters=filters, kernel_size=(1, 1, 5)) bs.append(b4) print(b4.get_shape()) x = Concatenate(axis=4)(bs) print(x.get_shape()) return x
def ExtractFeatures(self, X): X = Conv3D(self.filter, kernel_size=(3, 3, 1), kernel_initializer=self.init, kernel_regularizer=self.regularizer, padding='same')(X) #stage 2 X = self.identity_block(X, stage=2, block='a') #stage 3 for i in range(2): #change from 2 to 20 #stage 4 #stage 5 X = self.identity_block(X, stage=5, block='b' + str(i)) #stage 6 X = BatchNormalization(axis=3)(X) X = Activation('relu')(X) X = AveragePooling3D(pool_size=(5, 5, 2))(X) #output layer X = Dropout(0.5)(X) X = Flatten()(X) return X return X
def cloneLayerFromLayer(pLayer): if isinstance(pLayer, Convolution1D): return Convolution1D.from_config(pLayer.get_config()) elif isinstance(pLayer, Convolution2D): return Convolution2D.from_config(pLayer.get_config()) elif isinstance(pLayer, Convolution3D): return Convolution3D.from_config(pLayer.get_config()) # Max-Pooling: elif isinstance(pLayer, MaxPooling1D): return MaxPooling2D.from_config(pLayer.get_config()) elif isinstance(pLayer, MaxPooling2D): return MaxPooling2D.from_config(pLayer.get_config()) elif isinstance(pLayer, MaxPooling3D): return MaxPooling3D.from_config(pLayer.get_config()) # Average-Pooling elif isinstance(pLayer, AveragePooling1D): return AveragePooling1D.from_config(pLayer.get_config()) elif isinstance(pLayer, AveragePooling2D): return AveragePooling2D.from_config(pLayer.get_config()) elif isinstance(pLayer, AveragePooling3D): return AveragePooling3D.from_config(pLayer.get_config()) # elif isinstance(pLayer, Flatten): return Flatten.from_config(pLayer.get_config()) elif isinstance(pLayer, Merge): return Merge.from_config(pLayer.get_config()) elif isinstance(pLayer, Activation): return Activation.from_config(pLayer.get_config()) elif isinstance(pLayer, Dropout): return Dropout.from_config(pLayer.get_config()) # elif isinstance(pLayer, Dense): return Dense.from_config(pLayer.get_config()) return None
def Downsample(input_size): model = Sequential() model.add( AveragePooling3D(pool_size=(2, 2, 2), input_shape=(61, 73, 61, input_size), border_mode='same')) return model
def define_model(image_shape): img_input = Input(shape=image_shape) x = Convolution3D(16, 5, 5, 5, subsample=(1, 1, 1), border_mode='same')(img_input) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=16, block=0, subsample_factor=1) x = res_block(x, nb_filters=32, block=1, subsample_factor=2) x = res_block(x, nb_filters=32, block=1, subsample_factor=1) x = res_block(x, nb_filters=32, block=1, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=2) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=64, block=2, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=2) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = res_block(x, nb_filters=128, block=3, subsample_factor=1) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = AveragePooling3D(pool_size=(4, 4, 8))(x) x = Flatten()(x) x = Dense(1, activation='sigmoid', name='predictions')(x) model = Model(img_input, x) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'precision', 'recall', 'fmeasure']) model.summary() return model
def inception3D(image_size, num_labels): num_channels=1 inputs = Input(shape = (image_size, image_size, image_size, num_channels)) m = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='valid', input_shape=())(inputs) m = MaxPooling3D(pool_size=(2, 2, 2), strides=None, border_mode='same')(m) # inception module 0 branch1x1 = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m) branch3x3_reduce = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m) branch3x3 = Convolution3D(64, 3, 3, 3, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch3x3_reduce) branch5x5_reduce = Convolution3D(16, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m) branch5x5 = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch5x5_reduce) branch_pool = MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='same')(m) branch_pool_proj = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch_pool) m = merge([branch1x1, branch3x3, branch5x5, branch_pool_proj], mode='concat', concat_axis=-1) m = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='valid')(m) m = Flatten()(m) m = Dropout(0.7)(m) # expliciately seperate Dense and Activation layers in order for projecting to structural feature space m = Dense(num_labels, activation='linear')(m) m = Activation('softmax')(m) mod = KM.Model(input=inputs, output=m) return mod
def graph_embedding(tensor, n_layers, n_avg_size, n_kernel_size, t_kernel_size, n_max_size, t_max_size): """ Graph embedding. :param tensor: :param n_layers: :return: """ input_shape = K.int_shape(tensor) _, n_odes, n_timesteps, side_dim, side_dim, n_channels_in = input_shape # hide temporal dimension tensor = TransposeLayer((0, 2, 1, 3, 4, 5))(tensor) # (None, 64, 100, 7, 7, 1024) tensor = ReshapeLayer((n_odes, side_dim, side_dim, n_channels_in))(tensor) # pool over node tensor = AveragePooling3D(pool_size=(n_avg_size, 1, 1), name='pool_n')(tensor) _, n_odes, side_dim, side_dim, n_channels_in = K.int_shape(tensor) # recover node dimension tensor = ReshapeLayer((n_timesteps, n_odes, side_dim, side_dim, n_channels_in))(tensor) # (None, 64, 100, 7, 7, 1024) tensor = TransposeLayer((0, 2, 1, 3, 4, 5))(tensor) # (None, 100, 64, 7, 7, 1024) # hide the node dimension tensor = ReshapeLayer((n_timesteps, side_dim, side_dim, n_channels_in))(tensor) # (None, 64, 7, 7, 1024) # 2 layers spatio-temporal conv for i in range(n_layers): layer_id = '%d' % (i + 1) # spatial conv tensor = Conv3D(n_channels_in, (1, 1, 1), padding='SAME', name='conv_s_%s' % (layer_id))(tensor) # (None, 64, 7, 7, 1024) # temporal conv tensor = DepthwiseConv1DLayer(t_kernel_size, padding='SAME', name='conv_t_%s' % (layer_id))(tensor) # (None, 64, 7, 7, 1024) # node conv tensor = __convolve_nodes(tensor, n_odes, layer_id, n_kernel_size) # (None, 100, 7, 7, 1024) # activation tensor = BatchNormalization()(tensor) tensor = LeakyReLU(alpha=0.2)(tensor) # max_pool over nodes tensor = MaxPooling3D(pool_size=(n_max_size, 1, 1), name='pool_n_%s' % (layer_id))(tensor) # (None, 100, 7, 7, 1024) _, n_odes, side_dim, side_dim, n_channels_in = K.int_shape(tensor) # get back temporal dimension and hide node dimension tensor = ReshapeLayer((n_timesteps, n_odes, side_dim, side_dim, n_channels_in))(tensor) # (None, 64, 100, 7, 7, 1024) tensor = TransposeLayer((0, 2, 1, 3, 4, 5))(tensor) # (None, 100, 64, 7, 7, 1024) tensor = ReshapeLayer((n_timesteps, side_dim, side_dim, n_channels_in))(tensor) # (None, 64, 7, 7, 1024) # max_pool over time tensor = MaxPooling3D(pool_size=(t_max_size, 1, 1), name='pool_t_%s' % (layer_id))(tensor) # (None, 64, 7, 7, 1024) _, n_timesteps, side_dim, side_dim, n_channels_in = K.int_shape(tensor) # (None, 64, 7, 7, 1024) # recover nodes dimension tensor = ReshapeLayer((n_odes, n_timesteps, side_dim, side_dim, n_channels_in))(tensor) return tensor
def classifier(base_layers, input_rois, num_rois, nb_classes=21, trainable=False): # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround if K.backend() == 'tensorflow': pooling_regions = 14 input_shape = (num_rois, 14, 14, 14, 1024) elif K.backend() == 'theano': pooling_regions = 7 input_shape = (num_rois, 1024, 7, 7, 7) x = RoiPoolingConv3D(pooling_regions, num_rois)([base_layers, input_rois]) # replace call for readability # out = classifier_layers(out_roi_pool, input_shape=input_shape, trainable=True) if K.backend() == 'tensorflow': x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(2, 2), trainable=trainable) elif K.backend() == 'theano': x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(1, 1), trainable=trainable) x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b', trainable=trainable) x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c', trainable=trainable) out = TimeDistributed(AveragePooling3D((7, 7, 7)), name='avg_pool')(x) out = TimeDistributed(Flatten())(out) out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out) # note: no regression target for bg class out_regr = TimeDistributed(Dense(4 * (nb_classes - 1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out) return [out_class, out_regr]
def get_block(input, nc, batchnormalization, nb_filter1, nb_filter2, dropout1, dropout2, pooling, kernel_size1=3, kernel_size2=3): block = input num_dims = len(K.int_shape(input)) if pooling == 'max' and num_dims == 4: block = MaxPooling2D(pool_size=(2, 2))(block) elif pooling == 'max' and num_dims == 5: block = MaxPooling3D(pool_size=(2, 2, 2))(block) elif pooling == 'avg' and num_dims == 4: block = AveragePooling2D(pool_size=(2, 2))(block) elif pooling == 'avg' and num_dims == 5: block = AveragePooling3D(pool_size=(2, 2, 2))(block) if num_dims == 4: block = Conv2D(nb_filter1, (kernel_size1, kernel_size1), kernel_initializer=nc['kernel_initializer'], padding=nc['padding'])(block) elif num_dims == 5: block = Conv3D(nb_filter1, (kernel_size1, kernel_size1, kernel_size1), kernel_initializer=nc['kernel_initializer'], padding=nc['padding'])(block) if batchnormalization: block = BatchNormalization()(block) if nc['activation'] == 'CReLU': block = CReLU()(block) else: block = Activation(nc['activation'])(block) if dropout1 > 0: block = Dropout(dropout1)(block) if nb_filter2 < 1: return block if num_dims == 4: block = Conv2D(nb_filter2, (kernel_size2, kernel_size2), kernel_initializer=nc['kernel_initializer'], padding=nc['padding'])(block) elif num_dims == 5: block = Conv3D(nb_filter2, (kernel_size2, kernel_size2, kernel_size2), kernel_initializer=nc['kernel_initializer'], padding=nc['padding'])(block) if batchnormalization: block = BatchNormalization()(block) if nc['activation'] == 'CReLU': block = CReLU()(block) else: block = Activation(nc['activation'])(block) if dropout2 > 0: block = Dropout(dropout2)(block) return block
def get_model(input_layer, hyperparameters, visual=False): alpha_out = alpha(input_layer, hyperparameters, 0) beta_out = beta(alpha_out, hyperparameters, 1) gama_out = gama(beta_out, hyperparameters, 2) # delta_out = delta(gama_out,hyperparameters) # epsilon_out = epsilon(delta_out,hyperparameters) # zita_out = zita(epsilon_out,hyperparameters) conv_out = gama_out if hyperparameters["avgpool"]: avg = AveragePooling3D(name="AvgPooling3D")(conv_out) flatten = Flatten()(avg) else: #flatten flatten = Flatten()(conv_out) # flatten = Flatten()(sixth_out) #Neural Network with Dropout nn = Dense(units=hyperparameters["neurons"][0], activation=hyperparameters["activation"], name="nn_layer")(flatten) do = Dropout(rate=hyperparameters["dropout"], name="drop")(nn) if not visual: #Classification Layer final_layer = Dense(units=hyperparameters["num_classes"], activation=hyperparameters["classifier"], name=hyperparameters["classifier"] + "_layer")(do) else: final_layer = do return final_layer
def encoded_conv_model_API(): #Erste Hälfte des conv_model_API autoencoder, um zu testen, wie import funktioniert. inputs = Input(shape=(11, 13, 18, 1)) x = Conv3D(filters=16, kernel_size=(2, 2, 3), padding='valid', activation='relu', trainable=False)(inputs) #10x12x16 x 16 x = AveragePooling3D((2, 2, 2), padding='valid')(x) #5x6x8 x 16 x = Conv3D(filters=8, kernel_size=(3, 3, 3), padding='valid', activation='relu', trainable=False)(x) #3x4x6 x 8 encoded = Conv3D(filters=4, kernel_size=(2, 3, 3), padding='valid', activation='relu', trainable=False)(x) #2x2x4 x 4 autoencoder = Model(inputs, encoded) return autoencoder
def init_downscale_model(input_size, sc, get_output_shape=False): model = Sequential() model.add(AveragePooling3D(pool_size=sc, border_mode='valid', input_shape=input_size)) if get_output_shape: return model.get_output_shape_at(0)[1:] else: return model
def create_network(input_shape): model = keras.models.Sequential() # Block 01 model.add(AveragePooling3D(input_shape=input_shape, pool_size=(2, 1, 1), strides=(2, 1, 1), padding='same', name='AvgPool1')) model.add(Conv3D(64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='Conv1')) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='MaxPool1')) # Block 02 model.add(Conv3D(128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='Conv2')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='MaxPool2')) model.add(Dropout(rate=0.3)) # Block 03 model.add(Conv3D(256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='Conv3A')) model.add(Conv3D(256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='Conv3B')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='MaxPool3')) model.add(Dropout(rate=0.4)) # Block 04 model.add(Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='Conv4A')) model.add(Conv3D(512, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='Conv4B')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='MaxPool4')) model.add(Dropout(rate=0.5)) # Block 05 model.add(Conv3D(256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', padding='same', name='Conv5')) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.6)) model.add(Dense(2, activation='softmax')) return model
def conv_block3D(self): inputs = Input(self.input_size) ##Convolutional Layer 1 conv1 = Conv3D(32, (3, 5, 5), activation=None, padding='valid', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.1))(inputs) bn1 = BatchNormalization()(conv1) bn1 = LeakyReLU(alpha=0.1)(bn1) maxPool1 = MaxPooling3D(pool_size=(1, 2, 2))(bn1) ##Convolutional Layer 2 conv2 = Conv3D(64, (2, 5, 5), activation=None, padding='valid', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.1))(maxPool1) bn2 = BatchNormalization()(conv2) bn2 = LeakyReLU(alpha=0.1)(bn2) maxPool2 = MaxPooling3D(pool_size=(1, 2, 2))(bn2) average = AveragePooling3D(pool_size=(64, 1, 1), padding='same')(maxPool2) reshape = Reshape((42, 42, 64))(average) return inputs, reshape
def inception_module(prev_layer, ds=2): a = Conv3D(64 // ds, (1, 1, 1), strides=(1, 1, 1), padding='same')(prev_layer) b = Conv3D(96 // ds, (1, 1, 1), strides=(1, 1, 1), activation='relu', padding='same')(prev_layer) b = Conv3D(128 // ds, (3, 3, 3), strides=(1, 1, 1), padding='same')(b) c = Conv3D(96 // ds, (1, 1, 1), strides=(1, 1, 1), activation='relu', padding='same')(prev_layer) c = Conv3D(128 // ds, (5, 5, 5), strides=(1, 1, 1), padding='same')(c) d = AveragePooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(prev_layer) d = Conv3D(32 // ds, (1, 1, 1), strides=(1, 1, 1), padding='same')(d) e = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same')(prev_layer) e = Conv3D(32 // ds, (1, 1, 1), strides=(1, 1, 1), padding='same')(e) out_layer = concatenate([a, b, c, d, e], axis=-1) return out_layer
def _transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 with K.name_scope('transition_block'): x = BatchNormalization(axis=concat_axis, epsilon=1e-5, momentum=0.1)(ip) x = Activation('relu')(x) x = Conv3D(int(nb_filter * compression), (1, 1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = AveragePooling3D((2, 2, 2), strides=(2, 2, 1))(x) return x
def execute(imgListTP, imgListFP): # Permutation of 1, 2, 3, ..., len(candidates_TP) r = np.random.permutation(range(1, len(imgListTP))) lastTrainPos = int(round(len(r) * trainPerc)) candidates_TP_train = imgListTP[r[1:lastTrainPos]] candidates_TP_test = imgListTP[r[(lastTrainPos + 1):]] # Permutation of 1, 2, 3, ..., len(candidates_FP) r = np.random.permutation(range(1, len(imgListFP))) lastTrainPos = int(round(len(r) * trainPerc)) candidates_FP_train = imgListFP[r[1:lastTrainPos]] candidates_FP_test = imgListFP[r[(lastTrainPos + 1):]] x_train = np.concatenate((candidates_TP_train, candidates_FP_train)) x_test = np.concatenate((candidates_TP_test, candidates_FP_test)) x_train = reshapeForModelInput(x_train) x_test = reshapeForModelInput(x_test) print(x_train.shape) # print(candidates_TP_test.shape) # print(candidates_FP_test.shape) # print(x_train.shape) y_train = np.concatenate((np.ones(len(candidates_TP_train)), np.zeros(len(candidates_FP_train)))) y_test = np.concatenate( (np.ones(len(candidates_TP_test)), np.zeros(len(candidates_FP_test)))) # plt.figure(0) # plt.imshow(x_train[10, rAroundCentroid, :, :, 0], cmap='gray') # plt.show() # # for l in range(0,x_train.shape[0]): # print(l[0].shape) # # for l in candidates_FP_test: # plt.figure(0) # plt.imshow(l[rAroundCentroid, :, :], cmap='gray') # plt.show() # Model creation model = Sequential() model.add( Conv3D(1, (3, 3, 3), activation='relu', input_shape=(11, 11, 11, 1))) model.add(Conv3D(1, (3, 3, 3), activation='relu')) model.add(Conv3D(1, (3, 3, 3), activation='relu')) model.add(AveragePooling3D((2, 2, 2))) model.add(Flatten()) model.add(Dense(1)) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=32, batch_size=32) score = model.evaluate(x_test, y_test, batch_size=20) return score