def create_autoencoder(img_px_size=32, slice_count=8): """ this model assumes (32, 32, 8) is the dimensionality of the 3D input """ tf.keras.backend.set_image_data_format("channels_last") IMG_PX_SIZE = img_px_size SLICE_COUNT = slice_count input_shape = (IMG_PX_SIZE, IMG_PX_SIZE, SLICE_COUNT, 1) input_img = Input(shape=input_shape) # encoder portion x = Conv3D(16, (3, 3, 3), activation="relu", padding="same")(input_img) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(x) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(x) encoded = MaxPooling3D((2, 2, 2), padding="same")(x) # at this point the representation is compressed to 4*4*8 = 128 dims # decoder portion x = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(encoded) x = UpSampling3D((2, 2, 2))(encoded) x = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(x) x = UpSampling3D((2, 2, 2))(x) x = Conv3D(16, (3, 3, 3), activation="relu", padding="same")(x) x = UpSampling3D((2, 2, 2))(x) decoded = Conv3D(1, (3, 3, 3), activation="sigmoid", padding="same")(x) autoencoder = Model(input_img, decoded) # autoencoder.compile(optimizer = 'adam', loss='binary_crossentropy') encoder = Model(input_img, encoded) return autoencoder, encoder
def MINI_MTL(inputs, filters, numClasses, i): x_edge = RA(inputs, inputs, filters) x_mask = RA(inputs, inputs, filters) x_edge = Conv3D(filters, (3, 3, 3), padding='same')(x_edge) x_edge = BatchNormalization(axis=-1)(x_edge) x_edge = Activation('relu')(x_edge) x_mask = Conv3D(filters, (3, 3, 3), padding='same')(x_mask) x_mask = BatchNormalization(axis=-1)(x_mask) x_mask = Activation('relu')(x_mask) out_edge = Conv3D(numClasses - 1, (1, 1, 1), padding='same')(x_edge) out_edge = UpSampling3D(pow(2, i))(out_edge) out_edge = Softmax(axis=-1, dtype='float32', name='out_edge_{}'.format(i))(out_edge) out_mask = Conv3D(numClasses, (1, 1, 1), padding='same')(x_mask) out_mask = UpSampling3D(pow(2, i))(out_mask) out_mask = Softmax(axis=-1, dtype='float32', name='out_mask_{}'.format(i))(out_mask) # out_mtl = Add()([x_mask, x_edge]) out_mtl = Concatenate()([x_mask, x_edge]) out_mtl = Conv3D(filters, (1, 1, 1), padding='same')(out_mtl) return out_mtl, out_edge, out_mask
def build_MINI_MTL(input_shape, filters, numClasses, i): input_layer = Input(shape=(input_shape, input_shape, input_shape, filters)) x_edge = RA(input_layer, input_layer, filters) x_mask = RA(input_layer, input_layer, filters) x_edge = Conv3D(filters, (3, 3, 3), padding='same')(x_edge) x_edge = BatchNormalization(axis=-1)(x_edge) x_edge = Activation('relu')(x_edge) x_mask = Conv3D(filters, (3, 3, 3), padding='same')(x_mask) x_mask = BatchNormalization(axis=-1)(x_mask) x_mask = Activation('relu')(x_mask) out_edge = Conv3D(numClasses, (1, 1, 1), padding='same')(x_edge) out_edge = Softmax(axis=-1)(out_edge) out_edge = UpSampling3D(pow(2, i), name='out_edge_{}'.format(i))(out_edge) out_mask = Conv3D(numClasses, (1, 1, 1), padding='same')(x_mask) out_mask = Softmax(axis=-1)(out_mask) out_mask = UpSampling3D(pow(2, i), name='out_mask_{}'.format(i))(out_mask) out_mtl = Concatenate()([x_mask, x_edge]) out_mtl = Conv3D(filters, (1, 1, 1), padding='same')(out_mtl) mtl_model = Model(inputs=[input_layer], outputs=[out_edge, out_mask]) return mtl_model, out_mtl
def vae_part(x, dim=128): # inputshape: 1*256*20*24*16 x = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=(2, 2, 2), padding='same')(x) # 1*16*10*12*8 x = BatchNormalization()(x) # 1*16*10*12*8 x = Activation('relu')(x) # 1*16*10*12*8 # x = Flatten()(x) # 1*15360*1? x = Dense(256)(x) # 1*256*1? z_mean = Dense(dim)(x) # 1*128*1? z_log_var = Dense(dim)(x) # 1*128*1? x = Lambda(sampling, output_shape=(dim,))([z_mean, z_log_var]) # 128 # x = Dense(15360)(x) # 15360 x = Reshape((16, 10, 12, 8))(x) # 1*16*10*12*8 x = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(x) # 1*128*10*12*8 # x = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(x) # 1*256*10*12*8 x = Activation('relu')(x) # 1*256*10*12*8 x = UpSampling3D(size=(2, 2, 2))(x) # 1*256*20*24*16 # x = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(x) # 1*128*20*24*16 x = UpSampling3D(size=(2, 2, 2))(x) # 1*128*40*48*32 # x = res_block(x, filters=128) # 1*128*40*48*32 # x = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(x) # 1*64*40*48*32 x = UpSampling3D(size=(2, 2, 2))(x) # 1*64*80*96*64 # x = res_block(x, filters=64) # 1*64*80*96*64 # x = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(x) # 1*32*80*96*64 x = UpSampling3D(size=(2, 2, 2))(x) # 1*32*160*192*128 # x = Conv3D(filters=4, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same')(x) # 1*4*160*192*128 return x, z_mean, z_log_var
def local_network_function(input_img): # encoder conv1 = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(input_img) pool1 = MaxPool3D(pool_size=(2, 2, 2))(conv1) conv2 = Conv3D(16, (3, 3, 3), activation="relu", padding="same")(pool1) pool2 = MaxPool3D(pool_size=(2, 2, 2))(conv2) conv3 = Conv3D(32, (3, 3, 3), activation="relu", padding="same")(pool2) # decoder up1 = UpSampling3D((2, 2, 2))(conv3) up1 = ZeroPadding3D(padding=((0, 1), (0, 1), (0, 1)))(up1) conc_up_1 = Concatenate()([up1, conv2]) conv4 = Conv3D(16, (3, 3, 3), activation="relu", padding="same")(conc_up_1) up2 = UpSampling3D((2, 2, 2))(conv4) up2 = ZeroPadding3D(padding=((0, 1), (0, 1), (0, 1)))(up2) conc_up_2 = Concatenate()([up2, conv1]) conv5 = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(conc_up_2) out = Conv3D(1, (1, 1, 1), activation=None, padding="same")(conv5) return out
def autodecoder(latent): # decoder latent = Conv3DTranspose(data_format='channels_last', filters=1, kernel_size=(2, 2, 2), strides=stride, activation='relu', padding=padding)(latent) x = UpSampling3D(data_format='channels_last', size=(1, 1, 2))(latent) x = Conv3DTranspose(data_format='channels_last', filters=16, kernel_size=(2, 2, 2), strides=stride, activation='relu', padding=padding)(x) x = UpSampling3D(data_format='channels_last', size=(3, 3, 2))(x) x = Conv3DTranspose(data_format='channels_last', filters=64, kernel_size=(3, 3, 3), strides=stride, activation='relu', padding=padding)(x) x = UpSampling3D(data_format='channels_last', size=(3, 2, 2))(x) decoded = Conv3DTranspose(data_format='channels_last', filters=32, kernel_size=(3, 3, 3), strides=stride, activation='relu', padding=padding, name='decode')(x) # softmax trying(seems not good) # relu good # sigmoid (seems not good) return decoded
def build_res_atten_unet_3d(input_shape, filter_num=8, merge_axis=-1, pool_size=(2, 2, 2) , up_size=(2, 2, 2)): data = Input(shape=input_shape) conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) conv1 = BatchNormalization()(conv1) conv1 = Activation('relu')(conv1) pool = MaxPooling3D(pool_size=pool_size)(conv1) res1 = residual_block(pool, output_channels=filter_num * 4) pool1 = MaxPooling3D(pool_size=pool_size)(res1) res2 = residual_block(pool1, output_channels=filter_num * 8) pool2 = MaxPooling3D(pool_size=pool_size)(res2) res3 = residual_block(pool2, output_channels=filter_num * 16) pool3 = MaxPooling3D(pool_size=pool_size)(res3) res4 = residual_block(pool3, output_channels=filter_num * 32) pool4 = MaxPooling3D(pool_size=pool_size)(res4) res5 = residual_block(pool4, output_channels=filter_num * 64) res5 = residual_block(res5, output_channels=filter_num * 64) atb5 = attention_block(res4, encoder_depth=1, name='atten1') up1 = UpSampling3D(size=up_size)(res5) merged1 = concatenate([up1, atb5], axis=merge_axis) res5 = residual_block(merged1, output_channels=filter_num * 32) atb6 = attention_block(res3, encoder_depth=2, name='atten2') up2 = UpSampling3D(size=up_size)(res5) merged2 = concatenate([up2, atb6], axis=merge_axis) res6 = residual_block(merged2, output_channels=filter_num * 16) atb7 = attention_block(res2, encoder_depth=3, name='atten3') up3 = UpSampling3D(size=up_size)(res6) merged3 = concatenate([up3, atb7], axis=merge_axis) res7 = residual_block(merged3, output_channels=filter_num * 8) atb8 = attention_block(res1, encoder_depth=4, name='atten4') up4 = UpSampling3D(size=up_size)(res7) merged4 = concatenate([up4, atb8], axis=merge_axis) res8 = residual_block(merged4, output_channels=filter_num * 4) up = UpSampling3D(size=up_size)(res8) merged = concatenate([up, conv1], axis=merge_axis) conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) conv9 = BatchNormalization()(conv9) conv9 = Activation('relu')(conv9) output = Conv3D(1, 3, padding='same', activation='sigmoid')(conv9) model = Model(data, output) return model
def create_experimental_autoencoder(img_px_size=64, slice_count=8): """ this model assumes (64, 64, 8) is the dimensionality of the 3D input """ tf.keras.backend.set_image_data_format("channels_last") IMG_PX_SIZE = img_px_size SLICE_COUNT = slice_count input_shape = (IMG_PX_SIZE, IMG_PX_SIZE, SLICE_COUNT, 1) input_img = Input(shape=input_shape) initializer = tf.keras.initializers.GlorotNormal() # encoder portion x = Conv3D( 20, (5, 5, 5), activation="relu", padding="same", kernel_initializer=initializer )(input_img) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Conv3D( 20, (3, 3, 3), activation="relu", padding="same", kernel_initializer=initializer )(x) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Conv3D( 20, (3, 3, 3), activation="relu", padding="same", kernel_initializer=initializer )(x) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Flatten()(x) encoded = Dense(500, activation="relu", kernel_initializer=initializer)(x) # at this point the representation is compressed to 500 dims # decoder portion x = Dense(3200, activation="relu", kernel_initializer=initializer)(encoded) x = Reshape((8, 8, 1, 50))(x) x = Conv3D( 20, (3, 3, 3), activation="relu", padding="same", kernel_initializer=initializer )(x) x = UpSampling3D((2, 2, 2))(x) x = Conv3D( 20, (3, 3, 3), activation="relu", padding="same", kernel_initializer=initializer )(x) x = UpSampling3D((2, 2, 2))(x) x = Conv3D( 20, (5, 5, 5), activation="relu", padding="same", kernel_initializer=initializer )(x) x = UpSampling3D((2, 2, 2))(x) decoded = Conv3D( 1, (3, 3, 3), activation="sigmoid", padding="same", kernel_initializer=initializer, )(x) autoencoder = Model(input_img, decoded) encoder = Model(input_img, encoded) return autoencoder, encoder
def simple_block_3d(input, number_of_filters, downsample=False, upsample=False, convolution_kernel_size=(3, 3, 3), deconvolution_kernel_size=(2, 2, 2), weight_decay=0.0, dropout_rate=0.0): number_of_output_filters = number_of_filters output = BatchNormalization()(input) output = ThresholdedReLU(theta=0)(output) if downsample: output = MaxPooling3D(pool_size=(2, 2, 2))(output) output = Conv3D( filters=number_of_filters, kernel_size=convolution_kernel_size, padding='same', kernel_regularizer=regularizers.l2(weight_decay))(output) if upsample: output = Conv3DTranspose( filters=number_of_filters, kernel_size=deconvolution_kernel_size, padding='same', kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(weight_decay))(output) output = UpSampling3D(size=(2, 2, 2))(output) if dropout_rate > 0.0: output = Dropout(rate=dropout_rate)(output) # Modify the input so that it has the same size as the output if downsample: input = Conv3D(filters=number_of_output_filters, kernel_size=(1, 1, 1), strides=(2, 2, 2), padding='same')(input) elif upsample: input = Conv3DTranspose(filters=number_of_output_filters, kernel_size=(1, 1, 1), padding='same')(input) input = UpSampling3D(size=(2, 2, 2))(input) elif number_of_filters != number_of_output_filters: input = Conv3D(filters=number_of_output_filters, kernel_size=(1, 1, 1), padding='same')(input) output = skip_connection(input, output) return (output)
def get_simple_3d_unet(input_shape): img_input = Input(input_shape) conv1 = conv_block_simple_3D(img_input, 32, "conv1_1") conv1 = conv_block_simple_3D(conv1, 32, "conv1_2") pool1 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding="same", name="pool1", data_format='channels_first')(conv1) conv2 = conv_block_simple_3D(pool1, 64, "conv2_1") conv2 = conv_block_simple_3D(conv2, 64, "conv2_2") pool2 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding="same", name="pool2", data_format='channels_first')(conv2) conv3 = conv_block_simple_3D(pool2, 128, "conv3_1") conv3 = conv_block_simple_3D(conv3, 128, "conv3_2") pool3 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding="same", name="pool3", data_format='channels_first')(conv3) conv4 = conv_block_simple_3D(pool3, 256, "conv4_1") conv4 = conv_block_simple_3D(conv4, 256, "conv4_2") conv4 = conv_block_simple_3D(conv4, 256, "conv4_3") up5 = concatenate( [UpSampling3D(data_format='channels_first')(conv4), conv3], axis=1) conv5 = conv_block_simple_3D(up5, 128, "conv5_1") conv5 = conv_block_simple_3D(conv5, 128, "conv5_2") up6 = concatenate( [UpSampling3D(data_format='channels_first')(conv5), conv2], axis=1) conv6 = conv_block_simple_3D(up6, 64, "conv6_1") conv6 = conv_block_simple_3D(conv6, 64, "conv6_2") up7 = concatenate( [UpSampling3D(data_format='channels_first')(conv6), conv1], axis=1) conv7 = conv_block_simple_3D(up7, 32, "conv7_1") conv7 = conv_block_simple_3D(conv7, 32, "conv7_2") conv7 = SpatialDropout3D(rate=0.2, data_format='channels_first')(conv7) prediction = Conv3D(1, (1, 1, 1), activation="sigmoid", name="prediction", data_format='channels_first')(conv7) model = Model(img_input, prediction) return model
def decoder(x, from_encoder): main_path = UpSampling3D(size=(2, 2, 2))(x) main_path = concatenate([main_path, from_encoder[2]], axis=4) main_path = res_block(main_path, [256, 256], [(1, 1, 1), (1, 1, 1)]) main_path = UpSampling3D(size=(2, 2, 1))(main_path) main_path = concatenate([main_path, from_encoder[1]], axis=4) main_path = res_block(main_path, [128, 128], [(1, 1, 1), (1, 1, 1)]) main_path = UpSampling3D(size=(2, 2, 1))(main_path) main_path = concatenate([main_path, from_encoder[0]], axis=4) main_path = res_block(main_path, [64, 64], [(1, 1, 1), (1, 1, 1)]) return main_path
def make_dnn(**kwargs): inputs = Input(shape=(PROJ_SHAPE[0], PROJ_SHAPE[1], NUM_VIEWS)) x = inputs # Encoder x = Conv2D(32, 7, activation='relu', padding='same')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.2)(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.2)(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.2)(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.2)(x) x = Conv2D(128, 3, activation='relu', padding='same')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.2)(x) x = Conv2D(256, 3, activation='relu', padding='same')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Dropout(0.2)(x) x = Flatten()(x) # FC x = Dense(4320, activation='relu')(x) x = Dropout(0.5)(x) x = Reshape((6, 6, 6, 20))(x) # Decoder x = Dropout(0.2)(x) x = UpSampling3D(size=(2, 2, 2))(x) x = Dropout(0.2)(x) x = Conv3DTranspose(256, 3, activation='relu', padding='valid')(x) x = Dropout(0.2)(x) x = UpSampling3D(size=(2, 2, 2))(x) x = Conv3DTranspose(128, 3, activation='relu', padding='valid')(x) x = Dropout(0.2)(x) x = UpSampling3D(size=(2, 2, 2))(x) x = Conv3DTranspose(64, 3, activation='relu', padding='valid')(x) x = Dropout(0.2)(x) x = Conv3DTranspose(1, 3, activation='sigmoid', padding='valid')(x) outputs = Reshape((64, 64, 64))(x) dnn = Model(inputs=inputs, outputs=outputs) return dnn
def __init__(self, num_objs=35, num_chan=128, mask_size=64, name="Mask_Regression"): super(Mask_regression, self).__init__() self.num_objs = num_objs self.output_dim = 1 self.layers, cur_size = [], 1 self.layers.append(Input(shape=(num_objs, num_chan))) self.layers.append(Reshape((num_objs, 1, 1, num_chan))) while cur_size < mask_size: self.layers.append(UpSampling3D(size=(1, 2, 2))) self.layers.append(BatchNormalization()) self.layers.append( Conv3D(num_chan, kernel_size=(1, 3, 3), padding='same', activation='relu')) cur_size *= 2 if cur_size != mask_size: raise ValueError('Mask size must be a power of 2') self.layers.append( Conv3D(self.output_dim, kernel_size=(1, 1, 1), activation="sigmoid")) self.layers.append(Reshape((num_objs, mask_size, mask_size))) self.model = Sequential(self.layers)
def CHP(n_channelsX=1, n_channelsY=1): if K.image_data_format() == 'channels_first': init_shape = (n_channelsX, None, None, None) channel_axis = 1 else: init_shape = (None, None, None, n_channelsX) channel_axis = -1 in1 = Input(shape=init_shape, name='in1') x = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=1, use_bias=False, padding='same')(in1) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=2, use_bias=False, padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) for i in range(6): y = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=1, use_bias=False, padding='same')(x) y = BatchNormalization(axis=channel_axis)(y) y = Activation('relu')(y) y = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=1, use_bias=False, padding='same')(y) y = BatchNormalization(axis=channel_axis)(y) x = Add()([x, y]) x = (UpSampling3D((2, 2, 2)))(x) x = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=1, use_bias=False, padding='same')(x) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = Conv3D(filters=n_channelsY, kernel_size=(3, 3, 3), strides=1, use_bias=False, padding='same')(x) output = (Activation('linear'))(x) model = Model(in1, output) return model
def up_sampling_with_norm(Input1, Input2, target_size, kernel_size=3, padding="same", activation="relu", stride=1): # Input1 is from previous layer # Input2 is layer to concat pool = UpSampling3D((2, 2, 2))(Input1) merge = Concatenate()([pool, Input2]) conv = Conv3D(target_size, kernel_size, padding=padding, strides=stride, activation=activation, kernel_initializer='he_normal')(merge) conv = BatchNormalization()(conv) conv = Conv3D(target_size, kernel_size, padding=padding, strides=stride, activation=activation, kernel_initializer='he_normal')(conv) conv = BatchNormalization()(conv) return conv
def UNet3DPatch(shape, weights=None): conv_encoder = [] encoder_filters = np.array([8, 16, 32, 32]) mid_filters = 32 decoder_filters = np.array([64, 32, 16, 8]) bottom_filters = 4 inputs = Input(shape) # encoder x = inputs for filters in encoder_filters: conv = UNet3DBlock(x, layers=2, filters=filters) x = MaxPooling3D(pool_size=(2, 2, 2))(conv) conv_encoder.append(conv) x = UNet3DBlock(x, layers=1, filters=mid_filters) # decoder for filters in decoder_filters: x = UNet3DBlock(x, layers=2, filters=filters) x = UpSampling3D(size=(2, 2, 2))(x) x = concatenate([conv_encoder.pop(), x]) x = UNet3DBlock(x, layers=2, filters=bottom_filters) outputs = Conv3D(1, 1, activation='sigmoid')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(optimizer=Adam(learning_rate=1e-4), loss='binary_crossentropy', metrics=[AUC(), dice_coef]) model.summary() return model
def attention_block3D(x, gating, inter_shape): shape_x = k.int_shape(x) # Getting the x signal to the same shape as the gating signal theta_x = Conv3D(filters=inter_shape, kernel_size=3, strides=2, padding='same')(x) # 16 # Getting the gating signal to the same number of filters as the inter_shape phi_g = Conv3D(filters=inter_shape, kernel_size=1, strides=1, padding='same')(gating) concat_xg = add([phi_g, theta_x]) act_xg = Activation('relu')(concat_xg) psi = Conv3D(filters=1, kernel_size=1, padding='same')(act_xg) sigmoid_xg = Activation('sigmoid')(psi) upsample_psi = UpSampling3D(size=2)(sigmoid_xg) upsample_psi = repeat_elem(upsample_psi, shape_x[4], axs=4) # y = multiply([upsample_psi, x]) result = Conv3D(filters=shape_x[4], kernel_size=1, strides=1, padding='same')(y) result_bn = BatchNormalization()(result) return result_bn
def level_block(m, dim, depth, inc_rate, activation, dropout, batchnorm, pool_type, upconv, residual): if depth > 0: n = conv_block(m, dim, activation, batchnorm, residual) if pool_type == 0: m = MaxPooling3D(pool_size=(2, 2, 2))(n) elif pool_type == 1: m = AveragePooling3D(pool_size=(2, 2, 2))(n) else: Conv3D(dim, 3, strides=2, padding='same')(n) m = level_block(m, int(inc_rate*dim), depth-1, inc_rate, activation, dropout, batchnorm, pool_type, upconv, residual) if upconv: m = UpSampling3D(size=(2, 2, 2))(m) diff_phi = n.shape[1] - m.shape[1] diff_r = n.shape[2] - m.shape[2] diff_z = n.shape[3] - m.shape[3] padding = [[int(diff_phi), 0], [int(diff_r), 0], [int(diff_z), 0]] if diff_phi != 0: m = SymmetryPadding3d(padding=padding, mode="SYMMETRIC")(m) elif (diff_r != 0 or diff_z != 0): m = SymmetryPadding3d(padding=padding, mode="CONSTANT")(m) else: m = Conv3DTranspose(dim, 3, strides=2, activation=activation, padding='same')(m) n = concatenate([n, m]) m = conv_block(n, dim, activation, batchnorm, residual) else: m = conv_block(m, dim, activation, batchnorm, residual, dropout) return m
def AutoEncoder3D(n_channelsX, n_channelsY, n_filters=32): if K.image_data_format() == 'channels_first': init_shape = (n_channelsX, None, None, None) channel_axis = 1 else: init_shape = (None, None, None, n_channelsX) channel_axis = -1 in1 = Input(shape=init_shape, name='in1') x = Conv3D(n_filters, (3, 3, 3), padding='same')(in1) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = conv_bn_relu(input=x, n_filters=n_filters) x = conv_bn_relu(input=x, n_filters=n_filters) #MaxPooling for dimensionality reduction x = (MaxPooling3D((2, 2, 2), padding='valid'))(x) x = conv_bn_relu(input=x, n_filters=n_filters) x = conv_bn_relu(input=x, n_filters=n_filters) x = conv_bn_relu(input=x, n_filters=n_filters) #Upsampling to get back to the original dimensions #This syntax only works with Theano, not TensorFlow !!! x = (UpSampling3D((2, 2, 2)))(x) x = conv_bn_relu(input=x, n_filters=n_filters) x = (Conv3D(n_channelsY, (3, 3, 3), activation=None, padding='same'))(x) output = (Activation('linear'))(x) model = Model(in1, output) return model
def deconv3d(layer_input, skip_input, filters, f_size=4, dropout_rate='self'): """Layers used during upsampling""" u = UpSampling3D(size=2)(layer_input) u = Conv3D(filters, kernel_size=f_size, strides=1, padding='same')(u) if dropout_rate == 'self': dropout_rate = 0.5 if self.dropout else False if dropout_rate: u = Dropout(dropout_rate)(u) if self.adain: g = Dense(filters, bias_initializer='ones')(w) b = Dense(filters)(w) u = Lambda(adain)([u, g, b]) else: u = BatchNormalization(momentum=0.8)(u) u = ReLU()(u) # u = Concatenate()([u, skip_input]) ch, cw, cd = get_crop_shape(u, skip_input) crop_conv4 = Cropping3D(cropping=(ch, cw, cd), data_format="channels_last")(u) u = Concatenate()([crop_conv4, skip_input]) return u
def CFF(input_list, input_size, filters, i): out_shape = input_size / pow(2, i) y = tf.zeros_like(input_list[i - 1]) for j, x in enumerate(input_list): if j < i - 1: down_factor = int((input_size / pow(2, j + 1)) / out_shape) x = AveragePooling3D((down_factor, down_factor, down_factor))(x) x = Conv3D(filters, (1, 1, 1), padding='same')(x) sigm = Activation('sigmoid')(x) x = Multiply()([x, sigm]) y = Add()([y, x]) if j > i - 1: up_factor = int(out_shape / (input_size / pow(2, j + 1))) x = Conv3D(filters, (1, 1, 1), padding='same')(x) x = UpSampling3D((up_factor, up_factor, up_factor))(x) sigm = Activation('sigmoid')(x) x = Multiply()([x, sigm]) y = Add()([y, x]) x_i = input_list[i - 1] x_i_sigm = Activation('sigmoid')(x_i) x_i_sigm = -1 * x_i_sigm + 1 out = Multiply()([x_i_sigm, y]) out = Add()([out, x_i]) return out
def get_up_convolution(n_filters, pool_size, kernel_size=(2, 2, 2), strides=(2, 2, 2), deconvolution=False): if deconvolution: return Deconvolution3D(filters=n_filters, kernel_size=kernel_size, strides=strides) else: return UpSampling3D(size=pool_size)
def get_small_3d_unet(input_shape): img_input = Input(input_shape) conv1 = conv_block_simple_3D(img_input, 32, "conv1_1") conv1 = conv_block_simple_3D(conv1, 32, "conv1_2") pool1 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding="same", name="pool1", data_format='channels_first')(conv1) conv2 = conv_block_simple_3D(pool1, 64, "conv2_1") conv2 = conv_block_simple_3D(conv2, 64, "conv2_2") conv2 = conv_block_simple_3D(conv2, 64, "conv2_3") up3 = concatenate( [UpSampling3D(data_format='channels_first')(conv2), conv1], axis=1) conv3 = conv_block_simple_3D(up3, 32, "conv3_1") conv3 = conv_block_simple_3D(conv3, 32, "conv3_2") conv3 = SpatialDropout3D(rate=0.2, data_format='channels_first')(conv3) prediction = Conv3D(1, (1, 1, 1), activation="sigmoid", name="prediction", data_format='channels_first')(conv3) model = Model(img_input, prediction) return model
def SegNet3D(shape, weights=None): inputs = Input(shape) conv, pool = inputs, inputs # encoder for numOfFilters in [4, 8, 16, 32]: conv = SegNet3DBlock(pool, layers=2, filters=numOfFilters) pool = MaxPooling3D((2, 2, 2))(conv) conv = SegNet3DBlock(pool, layers=3, filters=128) # decoder for numOfFilters in [64, 32, 16, 8]: upsam = UpSampling3D((2, 2, 2))(conv) conv = SegNet3DBlock(upsam, layers=2, filters=numOfFilters) conv = SegNet3DBlock(upsam, layers=2, filters=4) outputs = Conv3D(1, 1, activation='sigmoid')(conv) model = Model(inputs=inputs, outputs=outputs) model.compile(optimizer=Adam(learning_rate=1e-4), loss='binary_crossentropy', metrics=[Precision(), Recall(), AUC(), Accuracy()]) model.summary() return model
def __init__(self, conf, nc): """ Parameters ---------- conf: Dictionary Configuration dictionary. """ # Initialize. self.conf = conf self.raw_data_path = self.conf['raw_data_path'] self.hps = self.conf['hps'] self.nn_arch = self.conf['nn_arch'] super(Module6, self).__init__() # Design layers. self.upsampling3d_1 = UpSampling3D() self.conv3d_1 = Conv3D( nc, kernel_size=3, strides=1, padding='same', use_bias=False, kernel_initializer=initializers.TruncatedNormal(), kernel_regularizer=regularizers.l2(self.hps['weight_decay'])) self.bn_1 = BatchNormalization(momentum=self.hps['bn_momentum'], scale=self.hps['bn_scale']) self.act_1 = Activation('relu')
def up_stage(inputs, skip, filters, prior_fn, kernel_size=3, activation="relu", padding="SAME"): up = UpSampling3D()(inputs) up = tfp.layers.Convolution3DFlipout(filters, 2, activation=activation, padding=padding, kernel_prior_fn=prior_fn)(up) up = GroupNormalization()(up) merge = concatenate([skip, up]) merge = GroupNormalization()(merge) conv = tfp.layers.Convolution3DFlipout(filters, kernel_size, activation=activation, padding=padding, kernel_prior_fn=prior_fn)(merge) conv = GroupNormalization()(conv) conv = tfp.layers.Convolution3DFlipout(filters, kernel_size, activation=activation, padding=padding, kernel_prior_fn=prior_fn)(conv) conv = GroupNormalization()(conv) return conv
def up_stage(inputs, skip, filters, kernel_size=3, activation="relu", padding="SAME"): up = UpSampling3D()(inputs) up = Conv3D(filters, 2, activation=activation, padding=padding)(up) up = GroupNormalization()(up) merge = concatenate([skip, up]) merge = GroupNormalization()(merge) convu = Conv3D(filters, kernel_size, activation=activation, padding=padding)(merge) convu = GroupNormalization()(convu) convu = Conv3D(filters, kernel_size, activation=activation, padding=padding)(convu) convu = GroupNormalization()(convu) convu = SpatialDropout3D(0.5)(convu, training=True) return convu
def build_recon_model(init_shape, n_channelsY=1, n_filters=32, kernel_size=3): if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = -1 input_recon = Input(shape=init_shape) recon = (UpSampling3D((2, 2, 2)))(input_recon) recon = Conv3D(filters=n_filters, kernel_size=(3, 3, 3), padding='same', kernel_initializer='glorot_uniform', use_bias=False, strides=1)(recon) recon = BatchNormalization(axis=channel_axis)(recon) recon = Activation('relu')(recon) recon = Conv3D(filters=n_channelsY, kernel_size=(kernel_size, kernel_size, kernel_size), padding='same', kernel_initializer='glorot_uniform', use_bias=False, strides=1)(recon) # recon=(Activation('linear'))(recon) model = Model(inputs=input_recon, outputs=recon) return model
def _upsampler(dimension, pool_x, pool_y, pool_z): if dimension == 4: return UpSampling3D(size=(pool_x, pool_y, pool_z)) elif dimension == 3: return UpSampling2D(size=(pool_x, pool_y)) elif dimension == 2: return UpSampling1D(size=pool_x)
def DyFAModel_withDenseVnet(DenseVnet3D_Model_Path,Input_shape,num_classes_clf,num_classes_for_seg): ###----Loading Segmentation Module---### inputs = tf.keras.Input(shape=Input_shape, name='CT') model_3DDenseVnet=DenseVnet3D(inputs,nb_classes=SEG_NUMBER_OF_CLASSES,encoder_nb_layers=NUM_DENSEBLOCK_EACH_RESOLUTION,growth_rate=NUM_OF_FILTER_EACH_RESOLUTION,dilation_list=DILATION_RATE,dropout_rate=DROPOUT_RATE) #-| Loading the Best Segmentation Weight model_3DDenseVnet.load_weights(DenseVnet3D_Model_Path) model_3DDenseVnet.summary() #-| Making the Segmentation Model Non-Trainable model_3DDenseVnet.trainable = False #-| Getting the features f_60_192_96_96=(model_3DDenseVnet.get_layer('concatenate_25').output) #last_predict=(model_3DDenseVnet.get_layer('conv3d_63').output) #-| Upsampling the lower Resolution FA upsampled_F=(UpSampling3D(size = (2,2,2))(f_60_192_96_96)) #-| Concatenate the FAs #FA_concatination=concatenate([upsampled_F,last_predict],axis=-1) #-|| DyFA- Pass the Concatinated Feature to 1x1x1 convolution to get a 1 channel Volume. DyFA=tf.keras.layers.Conv3D(1, 1, name='DyFA')(upsampled_F) #-|| Making a HxWxDx2 channel Input data for the DyFA Classification Model DyFA_INPUT=concatenate([DyFA,inputs],axis=-1) DyFA_Model_output=Resnet3D(DyFA_INPUT,num_classes=num_classes_clf) Final_DyFAmodel=tf.keras.Model(inputs=inputs, outputs=DyFA_Model_output) return Final_DyFAmodel