def CNN3D_lite(inp_shape, nb_classes): """ Lite C3D Model + LSTM L2 Normalisation of C3D Lite Feature Vectors 3M Parameters Able to Run on the Jetson Nano at 8FPS # From https://github.com/patrickjohncyh/ibm-waldo/blob/master/2-MachineLearning/server-training/Models.py """ model = tf.keras.Sequential() model.add(InputLayer(input_shape=inp_shape)) model.add( Conv3D(32, 3, strides=(1, 2, 2), activation='relu', padding='same', name='conv1', input_shape=inp_shape)) model.add( MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='same', name='pool1')) model.add(Conv3D(64, 3, activation='relu', padding='same', name='conv2')) model.add( MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')) model.add(Conv3D(128, 3, activation='relu', padding='same', name='conv3a')) model.add(Conv3D(128, 3, activation='relu', padding='same', name='conv3b')) model.add( MaxPooling3D(pool_size=(3, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')) model.add(Conv3D(128, 3, activation='relu', padding='same', name='conv4a')) model.add(Conv3D(128, 3, activation='relu', padding='same', name='conv4b')) model.add( MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')) model.add(Reshape((2, 384))) model.add(Lambda(lambda x: K.l2_normalize(x, axis=-1))) model.add( LSTM(512, return_sequences=False, input_shape=(2, 384), dropout=0.5)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes, activation='softmax')) return model
def downconv_model_3d(input_shape, use_batch_norm=True, dropout=0.5, dropout_change_per_layer=0.0, filters=16, num_layers=4, pooling=None, **kwargs): inputs = Input(input_shape) x = inputs down_layers = [] for l in range(num_layers): x = conv3d_block(inputs=x, filters=filters, use_batch_norm=use_batch_norm, dropout=dropout) down_layers.append(x) x = MaxPooling3D((2, 2, 2))(x) dropout += dropout_change_per_layer filters = filters * 2 # double the number of filters with each layer x = conv3d_block(inputs=x, filters=filters, use_batch_norm=use_batch_norm, dropout=dropout) if pooling == "max": x = MaxPooling3D((2, 2, 2))(x) elif pooling == "avg": x = AveragePooling3D((2, 2, 2))(x) model = Model(inputs=[inputs], outputs=[x]) return model, [down_layers, filters]
def createModel(): # create a sequential model with a layout similar to the vgg16 model model = Sequential() model.add(Conv3D(12, (3, 3, 3), input_shape=(80,80,80,1), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Conv3D(24, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Conv3D(48, (3, 3, 3), activation='relu')) model.add(Dropout(0.50)) model.add(Conv3D(48, (3, 3, 3), activation='relu')) model.add(Dropout(0.50)) model.add(Conv3D(48, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) print(model.output_shape) model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors print(model.output_shape) model.add(Dropout(0.50)) model.add(Dense(96, activation='relu')) model.add(Dropout(0.50)) model.add(Dense(3, activation='softmax')) model.summary() model.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics=['accuracy']) return model
def __init__(self, input, batch_size, image_height, image_width, class_number): x = Conv2D(filters=64, kernel_size=3, strides=1, activation="relu")(input) keep_1 = BatchNormalization()(x) x = MaxPooling3D(pool_size=(3, 3, 1), strides=(1, 1, 1))(keep_1) keep_2 = small_basic_block(x, filters=128) x = MaxPooling3D(pool_size=(3, 3, 1), strides=(1, 2, 2))(keep_2) x = small_basic_block(x, filters=256) keep_3 = small_basic_block(x, filters=256) x = MaxPooling3D(pool_size=(3, 3, 1), strides=(1, 2, 4))(keep_3) x = Dropout(x, rate=0.5) x = Conv2D(filters=256, kernel_size=(1, 4), strides=1)(x) x = BatchNormalization()(x) x = Dropout(x, rate=0.5) x = Conv2D(filters=class_number, kernel_size=(13, 1), strides=1, activation="relu")(x) keep_4 = BatchNormalization()(x) #Global context embedding keep_1 = AveragePooling3D(pool_size=(5, 5, 1), strides=(5, 5, 1))(keep_1) keep_2 = AveragePooling3D(pool_size=(5, 5, 1), strides=(5, 5, 1))(keep_2) keep_3 = AveragePooling3D(pool_size=(4, 10, 1), strides=(4, 2, 1))(keep_3)
def create_cnn_sparse_drop(lr, shape, drprate): cnn = Sequential([ Conv3D(32, (3, 3, 3), data_format='channels_last', input_shape=shape, activation='relu'), Conv3D(32, (3, 3, 3), activation='relu'), MaxPooling3D((3, 3, 3)), Conv3D(64, (3, 3, 3), activation='relu'), Conv3D(64, (3, 3, 3), activation='relu'), MaxPooling3D((3, 3, 3)), Conv3D(128, (3, 3, 3), activation='relu'), #Conv3D(32,(3,3,3),activation='relu'), MaxPooling3D((2, 2, 2)), Flatten(), Dense(512, activation='relu'), Dropout(drprate), Dense(256, activation='relu'), Dropout(drprate), Dense(128, activation='relu'), Dropout(drprate), Dense(64, activation='relu'), Dense(1, activation='sigmoid') ]) cnn.compile(optimizer=Adam(learning_rate=lr, clipnorm=1), metrics=['accuracy'], loss='binary_crossentropy') return cnn
def CNN_3D(input_shape=(50, 50, 50, 1)): inputs = Input(shape=input_shape) path = Conv3D(filters=32, kernel_size=(3, 3, 3))(inputs) path = LeakyReLU(alpha=.1)(path) path = Conv3D(filters=64, kernel_size=(3, 3, 3))(path) path = LeakyReLU(alpha=.1)(path) path = MaxPooling3D(pool_size=(3, 3, 3))(path) path = Conv3D(filters=128, kernel_size=(3, 3, 3))(path) path = LeakyReLU(alpha=.1)(path) path = Conv3D(filters=256, kernel_size=(3, 3, 3))(path) path = LeakyReLU(alpha=.1)(path) path = MaxPooling3D(pool_size=(3, 3, 3))(path) path = Flatten()(path) path = Dense(1024)(path) path = LeakyReLU(alpha=.1)(path) path = Dropout(0.25)(path) path = Dense(512)(path) path = LeakyReLU(alpha=.1)(path) path = Dropout(0.25)(path) path = Dense(256)(path) path = LeakyReLU(alpha=.1)(path) path = Dropout(0.25)(path) path = Dense(1)(path) path = Activation('sigmoid')(path) return Model(inputs=[inputs], outputs=[path])
def create(self): main_input = Input(shape=(self.seq_len, self.img_x, self.img_y, self.ch_n), name='main_input') layer = Conv3D(32, kernel_size=(3, 3, 3), activation='relu', padding='same')(main_input) layer = MaxPooling3D(pool_size=(3, 3, 3), padding='same')(layer) layer = BatchNormalization()(layer) layer = Conv3D(64, kernel_size=(3, 3, 3), activation='relu', padding='same')(layer) layer = MaxPooling3D(pool_size=(3, 3, 3), padding='same')(layer) layer = BatchNormalization()(layer) layer = Flatten()(layer) layer = Dense(512, activation='relu', name='conv_out')(layer) layer = Dense(128, activation='relu')(layer) layer = Dropout(0.6)(layer) layer = Dense(32, activation='relu')(layer) layer = Dropout(0.6)(layer) reg_out = Dense(1, activation='linear', name='reg_out')(layer) reg_conv_3d_model_double_in = Model(inputs=[main_input], outputs=[reg_out]) reg_conv_3d_model_double_in.summary() return reg_conv_3d_model_double_in
def model_4(input_shape): inputs = Input(input_shape) x = Conv3D(filters=32, kernel_size=3, activation='relu')(inputs) x = Conv3D(filters=32, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = Conv3D(filters=64, kernel_size=3, activation='relu')(x) x = Conv3D(filters=64, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = BatchNormalization(momentum=0.9)(x) x = Conv3D(filters=128, kernel_size=3, activation='relu')(x) x = Conv3D(filters=128, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = BatchNormalization(momentum=0.9)(x) x = Conv3D(filters=256, kernel_size=3, activation='relu')(x) x = Conv3D(filters=256, kernel_size=3, activation='relu')(x) x = Conv3D(filters=256, kernel_size=3, activation='relu')(x) x = GlobalAveragePooling3D()(x) x = Dropout(rate=0.2)(x) x = Dense(units=128, activation='relu')(x) x = Dropout(rate=0.2)(x) x = Dense(units=128, activation='relu')(x) outputs = Dense(units=3, activation="softmax")(x) model = tf.keras.Model(inputs, outputs, name="model_4_mri") return model
def build(self): wmn_input = Input(shape=self.input_shape) block_A = CBR(wmn_input, 16, (3, 3, 3)) block_A = CBR(block_A, 32, (3, 3, 3)) block_B = MaxPooling3D(pool_size=(2, 2, 1))(block_A) block_B = CBR(block_B, 32, (3, 3, 3)) block_B = CBR(block_B, 64, (3, 3, 3)) block_C = MaxPooling3D(pool_size=(2, 2, 1))(block_B) block_C = CBR(block_C, 64, (3, 3, 3)) block_C = CBR(block_C, 128, (3, 3, 3)) block_C = Dropout(rate=self.dropout_rate)(block_C) block_C = Conv3DTranspose(filters=64, kernel_size=(3, 3, 3), strides=(2, 2, 1), padding='same')(block_C) block_D = Concatenate()([block_C, block_B]) block_D = CBR(block_D, 64, (3, 3, 3)) block_D = CBR(block_D, 64, (3, 3, 3)) block_D = Conv3DTranspose(filters=32, kernel_size=(3, 3, 3), strides=(2, 2, 1), padding='same')(block_D) block_E = Concatenate()([block_D, block_A]) block_E = CBR(block_E, 32, (3, 3, 3)) block_E = CBR(block_E, 32, (3, 3, 3)) if self.single_slice_out: block_E = Conv3D(filters=32, kernel_size=(1, 1, 5))(block_E) csfn_output = Conv3D(filters=1, kernel_size=(1, 1, 1))(block_E) csfn_output = Reshape((256, 256, 1))(csfn_output) else: csfn_output = Conv3D(filters=1, kernel_size=(1, 1, 1))(block_E) self.csfn_output_name = csfn_output.name.split('/')[0] self.model = keras.Model(inputs=[wmn_input], outputs=[csfn_output], name='network')
def VGG16(): model = models.Sequential() model.add(Conv3D(32, (3,3,3), activation='relu', padding='same', input_shape=(32, 32, 32,1), kernel_regularizer=regularizers.l2(weight_decay))) model.add(MaxPooling3D(pool_size=[2,2,2],strides=None)) model.add(Conv3D(64, (3,3,3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(MaxPooling3D(pool_size=[2,2,2],strides=None)) model.add(Conv3D(128, (3,3,3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Conv3D(128, (3,3,3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(MaxPooling3D(pool_size=[2,2,2],strides=None)) model.add(Conv3D(256, (3,3,3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Conv3D(256, (3,3,3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(MaxPooling3D(pool_size=[2,2,2],strides=None)) model.add(Conv3D(256, (3,3,3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Conv3D(256, (3,3,3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(MaxPooling3D(pool_size=[2,2,2],strides=None)) # model.add(Flatten()) # 2*2*512 model.add(Dense(4068, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(1024, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(10, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(3, activation='softmax')) return model
def create_autoencoder(img_px_size=32, slice_count=8): """ this model assumes (32, 32, 8) is the dimensionality of the 3D input """ tf.keras.backend.set_image_data_format("channels_last") IMG_PX_SIZE = img_px_size SLICE_COUNT = slice_count input_shape = (IMG_PX_SIZE, IMG_PX_SIZE, SLICE_COUNT, 1) input_img = Input(shape=input_shape) # encoder portion x = Conv3D(16, (3, 3, 3), activation="relu", padding="same")(input_img) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(x) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(x) encoded = MaxPooling3D((2, 2, 2), padding="same")(x) # at this point the representation is compressed to 4*4*8 = 128 dims # decoder portion x = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(encoded) x = UpSampling3D((2, 2, 2))(encoded) x = Conv3D(8, (3, 3, 3), activation="relu", padding="same")(x) x = UpSampling3D((2, 2, 2))(x) x = Conv3D(16, (3, 3, 3), activation="relu", padding="same")(x) x = UpSampling3D((2, 2, 2))(x) decoded = Conv3D(1, (3, 3, 3), activation="sigmoid", padding="same")(x) autoencoder = Model(input_img, decoded) # autoencoder.compile(optimizer = 'adam', loss='binary_crossentropy') encoder = Model(input_img, encoded) return autoencoder, encoder
def get_video_model(input_shape, nb_classes): # Define model model = Sequential() model.add( Conv3D(32, kernel_size=(3, 3, 3), input_shape=(input_shape), padding='same')) model.add(Activation('relu')) model.add(Conv3D(32, kernel_size=(3, 3, 3), padding='same')) model.add(Activation('softmax')) model.add(MaxPooling3D(pool_size=(3, 3, 3), padding='same')) model.add(Dropout(0.5)) model.add(Conv3D(32, kernel_size=(3, 3, 3), padding='same')) model.add(Activation('relu')) model.add(Conv3D(32, kernel_size=(3, 3, 3), padding='same')) model.add(Activation('softmax')) model.add(MaxPooling3D(pool_size=(3, 3, 3), padding='same')) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(64, activation=None)) model.add(BatchNormalization()) model.add(Activation('sigmoid')) model.add(Dropout(0.5)) model.add(Dense(nb_classes, activation='softmax')) return model
def model_3(input_shape): inputs = tf.keras.layers.Input(input_shape) x = Conv3D(filters=16, kernel_size=3, activation='relu')(inputs) x = Conv3D(filters=16, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = Conv3D(filters=64, kernel_size=3, activation="relu")(x) x = Conv3D(filters=64, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = BatchNormalization()(x) x = Conv3D(filters=128, kernel_size=3, activation="relu")(x) x = Conv3D(filters=128, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = Flatten()(x) x = Dropout(rate=0.1)(x) x = Dense(units=256, activation="relu")(x) outputs = Dense(units=3, activation="softmax")(x) model = tf.keras.Model(inputs, outputs, name="model_3_pet") return model
def conv_3d(self): """ Build a 3D convolutional network, based loosely on C3D. https://arxiv.org/pdf/1412.0767.pdf """ # Model. model = Sequential() model.add( Conv3D(32, (3, 3, 3), activation='relu', input_shape=self.input_shape)) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2))) model.add(Conv3D(64, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2))) model.add(Conv3D(128, (3, 3, 3), activation='relu')) model.add(Conv3D(128, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2))) model.add(Conv3D(256, (2, 2, 2), activation='relu')) model.add(Conv3D(256, (2, 2, 2), activation='relu')) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2))) model.add(Flatten()) model.add(Dense(1024)) model.add(Dropout(0.5)) model.add(Dense(1024)) model.add(Dropout(0.5)) model.add(Dense(self.nb_classes, activation='softmax')) return model
def build(self): wmn_input = Input(shape=self.input_shape) block_A = CBR(wmn_input, 16, (3, 3, 2), padding='valid') block_A = CBR(block_A, 32, (3, 3, 1), padding='valid') block_B = MaxPooling3D(pool_size=(2, 2, 1))(block_A) block_B = CBR(block_B, 32, (3, 3, 2), padding='valid') block_B = CBR(block_B, 64, (3, 3, 1), padding='valid') block_C = MaxPooling3D(pool_size=(2, 2, 1))(block_B) block_C = CBR(block_C, 64, (3, 3, 2), padding='valid') block_C = CBR(block_C, 128, (3, 3, 1), padding='valid') block_D = MaxPooling3D(pool_size=(2, 2, 1))(block_C) block_D = CBR(block_D, 128, (3, 3, 2), padding='valid') block_D = CBR(block_D, 64, (3, 3, 1), padding='valid') fc = Flatten()(block_D) fc = Dropout(0.25)(fc) fc = Dense(256, activation='relu')(fc) discrim_output = Dense(1, activation='sigmoid')(fc) self.output_name = discrim_output.name.split('/')[0] self.model = keras.Model(inputs=[wmn_input], outputs=[discrim_output], name='network')
def build_CNN(input_shape, n_conv_layers=3, n_filters=16, n_dense_layers=0, n_nodes=50, learning_rate=0.01): # Setup a sequential model model = Sequential() # Add first convolutional layer to the model, requires input shape model.add(Conv3D(n_filters, kernel_size=(3, 3, 3), activation='relu', padding='same', input_shape=input_shape)) model.add(BatchNormalization()) model.add(MaxPooling3D(pool_size=(2,2,2))) # Add remaining convolutional layers to the model, the number of filters should increase a factor 2 for each layer temp = n_filters * 2 for i in range(n_conv_layers-1): model.add(Conv3D(temp, kernel_size=(3, 3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling3D(pool_size=(2,2,2))) temp = temp * 2 model.add(Flatten()) # Add intermediate dense layers for i in range(n_dense_layers): model.add(Dense(n_nodes,activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(1,activation='sigmoid')) # Compile model model.compile(loss=BC,optimizer=Adam(lr=learning_rate),metrics=['accuracy']) return model
def C3DNet(freeze_conv_layers=False, weights=None, dense_activation='softmax', dropout=0.5, include_top=False,input_data = Input(shape=(16, 112, 112, 3))): """ C3D model implementation. Source: https://github.com/adamcasson/c3d Reference: Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani,and Manohar Paluri. Learning spatiotemporal features with 3D convolutional networks. ICCV, 2015. Args: freeze_conv_layers: Whether to freeze convolutional layers at the time of training weights: Pre-trained weights dense_activation: Activation of the last layer dropout: Dropout of dense layers include_top: Whether to add fc layers Returns: C3D model """ # input_data = Input(shape=(16, 112, 112, 3)) model = Conv3D(64, 3, activation='relu', padding='same', name='conv1')(input_data) model = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')(model) # 2nd layer group model = Conv3D(128, 3, activation='relu', padding='same', name='conv2')(model) model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')(model) # 3rd layer group model = Conv3D(256, 3, activation='relu', padding='same', name='conv3a')(model) model = Conv3D(256, 3, activation='relu', padding='same', name='conv3b')(model) model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')(model) # 4th layer group model = Conv3D(512, 3, activation='relu', padding='same', name='conv4a')(model) model = Conv3D(512, 3, activation='relu', padding='same', name='conv4b')(model) model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')(model) # 5th layer group model = Conv3D(512, 3, activation='relu', padding='same', name='conv5a')(model) model = Conv3D(512, 3, activation='relu', padding='same', name='conv5b')(model) model = ZeroPadding3D(padding=(0, 1, 1), name='zeropad5')(model) # ((0, 0), (0, 1), (0, 1)) model = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')(model) model_flatten = Flatten(name='flatten')(model) # # FC layers group model = Dense(4096, activation='relu', name='fc6')(model_flatten) model = Dropout(dropout)(model) model = Dense(4096, activation='relu', name='fc7')(model) model_fc7 = Dropout(dropout)(model) model_fc8 = Dense(487, activation=dense_activation, name='fc8')(model_fc7) net_model = Model(input_data, model_fc8) if weights is not None: net_model.load_weights(weights) if include_top: model_fc8_new = Dense(1, activation=dense_activation, name='fc8')(model_fc7) net_model = Model(input_data, model_fc8_new) if freeze_conv_layers: for layer in model.layers[:-5]: layer.trainable = False for layer in model.layers: print(layer.name, layer.trainable) else: net_model = Model(input_data, model_flatten) return net_model
def create_experimental_autoencoder(img_px_size=64, slice_count=8): """ this model assumes (64, 64, 8) is the dimensionality of the 3D input """ tf.keras.backend.set_image_data_format("channels_last") IMG_PX_SIZE = img_px_size SLICE_COUNT = slice_count input_shape = (IMG_PX_SIZE, IMG_PX_SIZE, SLICE_COUNT, 1) input_img = Input(shape=input_shape) initializer = tf.keras.initializers.GlorotNormal() # encoder portion x = Conv3D( 20, (5, 5, 5), activation="relu", padding="same", kernel_initializer=initializer )(input_img) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Conv3D( 20, (3, 3, 3), activation="relu", padding="same", kernel_initializer=initializer )(x) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Conv3D( 20, (3, 3, 3), activation="relu", padding="same", kernel_initializer=initializer )(x) x = MaxPooling3D((2, 2, 2), padding="same")(x) x = Flatten()(x) encoded = Dense(500, activation="relu", kernel_initializer=initializer)(x) # at this point the representation is compressed to 500 dims # decoder portion x = Dense(3200, activation="relu", kernel_initializer=initializer)(encoded) x = Reshape((8, 8, 1, 50))(x) x = Conv3D( 20, (3, 3, 3), activation="relu", padding="same", kernel_initializer=initializer )(x) x = UpSampling3D((2, 2, 2))(x) x = Conv3D( 20, (3, 3, 3), activation="relu", padding="same", kernel_initializer=initializer )(x) x = UpSampling3D((2, 2, 2))(x) x = Conv3D( 20, (5, 5, 5), activation="relu", padding="same", kernel_initializer=initializer )(x) x = UpSampling3D((2, 2, 2))(x) decoded = Conv3D( 1, (3, 3, 3), activation="sigmoid", padding="same", kernel_initializer=initializer, )(x) autoencoder = Model(input_img, decoded) encoder = Model(input_img, encoded) return autoencoder, encoder
def build_res_atten_unet_3d(input_shape, filter_num=8, merge_axis=-1, pool_size=(2, 2, 2) , up_size=(2, 2, 2)): data = Input(shape=input_shape) conv1 = Conv3D(filter_num * 4, 3, padding='same')(data) conv1 = BatchNormalization()(conv1) conv1 = Activation('relu')(conv1) pool = MaxPooling3D(pool_size=pool_size)(conv1) res1 = residual_block(pool, output_channels=filter_num * 4) pool1 = MaxPooling3D(pool_size=pool_size)(res1) res2 = residual_block(pool1, output_channels=filter_num * 8) pool2 = MaxPooling3D(pool_size=pool_size)(res2) res3 = residual_block(pool2, output_channels=filter_num * 16) pool3 = MaxPooling3D(pool_size=pool_size)(res3) res4 = residual_block(pool3, output_channels=filter_num * 32) pool4 = MaxPooling3D(pool_size=pool_size)(res4) res5 = residual_block(pool4, output_channels=filter_num * 64) res5 = residual_block(res5, output_channels=filter_num * 64) atb5 = attention_block(res4, encoder_depth=1, name='atten1') up1 = UpSampling3D(size=up_size)(res5) merged1 = concatenate([up1, atb5], axis=merge_axis) res5 = residual_block(merged1, output_channels=filter_num * 32) atb6 = attention_block(res3, encoder_depth=2, name='atten2') up2 = UpSampling3D(size=up_size)(res5) merged2 = concatenate([up2, atb6], axis=merge_axis) res6 = residual_block(merged2, output_channels=filter_num * 16) atb7 = attention_block(res2, encoder_depth=3, name='atten3') up3 = UpSampling3D(size=up_size)(res6) merged3 = concatenate([up3, atb7], axis=merge_axis) res7 = residual_block(merged3, output_channels=filter_num * 8) atb8 = attention_block(res1, encoder_depth=4, name='atten4') up4 = UpSampling3D(size=up_size)(res7) merged4 = concatenate([up4, atb8], axis=merge_axis) res8 = residual_block(merged4, output_channels=filter_num * 4) up = UpSampling3D(size=up_size)(res8) merged = concatenate([up, conv1], axis=merge_axis) conv9 = Conv3D(filter_num * 4, 3, padding='same')(merged) conv9 = BatchNormalization()(conv9) conv9 = Activation('relu')(conv9) output = Conv3D(1, 3, padding='same', activation='sigmoid')(conv9) model = Model(data, output) return model
def C3D(weights='sports1M'): """Instantiates a C3D Kerasl model Keyword arguments: weights -- weights to load into model. (default is sports1M) Returns: A Keras model. """ if weights not in {'sports1M', None}: raise ValueError('weights should be either be sports1M or None') if K.image_data_format() == 'channels_last': shape = (16,112,112,3) else: shape = (3,16,112,112) model = Sequential() model.add(Conv3D(64, 3, activation='relu', padding='same', name='conv1', input_shape=shape)) model.add(MaxPooling3D(pool_size=(1,2,2), strides=(1,2,2), padding='same', name='pool1')) model.add(Conv3D(128, 3, activation='relu', padding='same', name='conv2')) model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool2')) model.add(Conv3D(256, 3, activation='relu', padding='same', name='conv3a')) model.add(Conv3D(256, 3, activation='relu', padding='same', name='conv3b')) model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool3')) model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv4a')) model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv4b')) model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool4')) model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv5a')) model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv5b')) model.add(ZeroPadding3D(padding=(0,1,1))) model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool5')) model.add(Flatten()) model.add(Dense(4096, activation='relu', name='fc6')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu', name='fc7')) model.add(Dropout(0.5)) model.add(Dense(487, activation='softmax', name='fc8')) if weights == 'sports1M': weights_path = get_file('sports1M_weights_tf.h5', WEIGHTS_PATH, cache_subdir='models', md5_hash='b7a93b2f9156ccbebe3ca24b41fc5402') model.load_weights(weights_path) return model
def randflow_model( img_shape, model, model_name='randflow_model', flow_sigma=None, flow_amp=None, blur_sigma=5, interp_mode='linear', indexing='xy', ): n_dims = len(img_shape) - 1 x_in = Input(img_shape, name='img_input_randwarp') if n_dims == 3: flow = MaxPooling3D(2)(x_in) flow = MaxPooling3D(2)(flow) blur_sigma = int(np.ceil(blur_sigma / 4.)) flow_shape = tuple([int(s / 4) for s in img_shape[:-1]] + [n_dims]) else: flow = x_in flow_shape = img_shape[:-1] + (n_dims, ) # random flow field if flow_amp is None: flow = RandFlow(name='randflow', img_shape=flow_shape, blur_sigma=blur_sigma, flow_sigma=flow_sigma)(flow) elif flow_sigma is None: flow = RandFlow_Uniform(name='randflow', img_shape=flow_shape, blur_sigma=blur_sigma, flow_amp=flow_amp)(flow) if n_dims == 3: flow = Reshape(flow_shape)(flow) # upsample with linear interpolation flow = Lambda(interp_upsampling)(flow) flow = Lambda(interp_upsampling, output_shape=img_shape[:-1] + (n_dims, ))(flow) flow = Reshape(img_shape[:-1] + (n_dims, ), name='randflow_out')(flow) else: flow = Reshape(img_shape[:-1] + (n_dims, ), name='randflow_out')(flow) x_warped = SpatialTransformer(interp_method=interp_mode, name='densespatialtransformer_img', indexing=indexing)([x_in, flow]) if model is not None: model_outputs = model(x_warped) if not isinstance(model_outputs, list): model_outputs = [model_outputs] else: model_outputs = [x_warped, flow] return Model(inputs=[x_in], outputs=model_outputs, name=model_name)
def myDenseNetv2Dropout(input_shape, dropout_rate=0.3): """ """ bn_axis = -1 if K.image_data_format() == 'channels_last' else 1 input_tensor = Input(shape=input_shape) #48, 240, 360, 1 x = Conv3D(16, (3, 3, 3), strides=(1, 2, 2), use_bias=False, padding='same', name='block0_conv1')(input_tensor) #[48, 120, 180] x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='block0_bn1')(x) x = Activation('relu', name='block0_relu1')(x) x = Conv3D(16, (3, 3, 3), strides=(1, 1, 1), use_bias=False, padding='same', name='block0_conv2')(x) #[48, 120, 180] x = _denseBlock(x, [16, 16], 'block_11') #[48, 120, 180] x = _transit_block(x, 16, 'block13') #[48, 120, 180] x = SpatialDropout3D(dropout_rate)(x) x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) #[24, 60, 90] x = _denseBlock(x, [24, 24, 24], 'block_21') #[24, 60, 90] x = _transit_block(x, 24, 'block23') #[24, 60, 90] x = SpatialDropout3D(dropout_rate)(x) x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) #[12, 30, 45] x = _denseBlock(x, [32, 32, 32, 32], 'block_31') #[12, 30, 45] x = SpatialDropout3D(dropout_rate)(x) x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) #[6, 15, 23] x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='block_final_bn')(x) x = Activation('relu', name='block_final_relu')(x) ##############above are bae#################### x = _denseBlock(x, [32, 32], 'EGFR_block_11') x = MaxPooling3D((1, 2, 2), strides=(1, 2, 2), padding='same')(x) #[6, 8, 12, 64] x = SpatialDropout3D(dropout_rate)(x) x = _transit_block(x, 64, 'EGFR_block_12') #[6, 8, 12, 64] x = GlobalAveragePooling3D()(x) x = Dense(1, activation='sigmoid', name='EGFR_global_pred')(x) # create model model = Model(input_tensor, x, name='myDense') plot_model(model, 'myDenseNetv2.png', show_shapes=True) return model
def train(): # Load Dataset x = np.load( directory.joinpath('datasets').joinpath(filename_x + '.npy')) / 255 y = np.load(directory.joinpath('datasets').joinpath(filename_y + '.npy')) # Add Number Of Color Channels x = np.expand_dims(x, axis=-1) input_shape = x.shape[1:] output_shape = y.shape[-1] # Create Model model = Sequential() model.add( ConvLSTM2D(filters=40, kernel_size=(3, 3), input_shape=input_shape, padding='same', return_sequences=True)) model.add(BatchNormalization()) model.add(MaxPooling3D(pool_size=(2, 2, 2), padding='same')) model.add( ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)) model.add(BatchNormalization()) model.add(MaxPooling3D(pool_size=(2, 2, 2), padding='same')) model.add( ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=False)) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(output_shape, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['binary_accuracy']) # Train Model model.fit(x, y, epochs=200, batch_size=10, validation_split=0.05) # Save Model model.save(str(directory.joinpath('models').joinpath(filename_model)))
def train(self): print("[C3D][train] start") # callbacks: early stop es_callback = keras.callbacks.EarlyStopping(monitor='loss', patience=2) cp_callback = keras.callbacks.ModelCheckpoint(filepath=CP_PATH, save_weights_only=True, verbose=1) model = keras.Sequential([ Conv3D(64, self.conv_kernel_shape, activation='relu', input_shape=self.input_data_shape), MaxPooling3D(pool_size=self.pool_kernel_shape1, strides=None, padding="valid", data_format=None), Conv3D(128, self.conv_kernel_shape, activation='relu'), MaxPooling3D(pool_size=self.pool_kernel_shape1, strides=None, padding="valid", data_format=None), Flatten(), Dense(64, activation="relu", name="fc_layer1"), Dense(64, activation="relu", name="fc_layer2"), Dense(self.output_size, activation="softmax", name="fc_layer3") ]) model.save(MODEL_PATH) model.compile('adam', loss='categorical_crossentropy') print(model.summary()) # get data (trainX, testX, trainY, testY) = self.getData() # model.save_weights(checkpoint_path.format(epoch=0)) print("\n[C3D][train] training network...") H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=self.batch_size, epochs=self.epoch_num, class_weight=self.class_weight, verbose=1, callbacks=[es_callback, cp_callback]) predictions = model.predict(testX, batch_size=self.batch_size) print("\n[C3D][train] evaluating network...") print( classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=self.targets)) print("[C3D][train] end")
def get_simple_3d_unet(input_shape): img_input = Input(input_shape) conv1 = conv_block_simple_3D(img_input, 32, "conv1_1") conv1 = conv_block_simple_3D(conv1, 32, "conv1_2") pool1 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding="same", name="pool1", data_format='channels_first')(conv1) conv2 = conv_block_simple_3D(pool1, 64, "conv2_1") conv2 = conv_block_simple_3D(conv2, 64, "conv2_2") pool2 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding="same", name="pool2", data_format='channels_first')(conv2) conv3 = conv_block_simple_3D(pool2, 128, "conv3_1") conv3 = conv_block_simple_3D(conv3, 128, "conv3_2") pool3 = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding="same", name="pool3", data_format='channels_first')(conv3) conv4 = conv_block_simple_3D(pool3, 256, "conv4_1") conv4 = conv_block_simple_3D(conv4, 256, "conv4_2") conv4 = conv_block_simple_3D(conv4, 256, "conv4_3") up5 = concatenate( [UpSampling3D(data_format='channels_first')(conv4), conv3], axis=1) conv5 = conv_block_simple_3D(up5, 128, "conv5_1") conv5 = conv_block_simple_3D(conv5, 128, "conv5_2") up6 = concatenate( [UpSampling3D(data_format='channels_first')(conv5), conv2], axis=1) conv6 = conv_block_simple_3D(up6, 64, "conv6_1") conv6 = conv_block_simple_3D(conv6, 64, "conv6_2") up7 = concatenate( [UpSampling3D(data_format='channels_first')(conv6), conv1], axis=1) conv7 = conv_block_simple_3D(up7, 32, "conv7_1") conv7 = conv_block_simple_3D(conv7, 32, "conv7_2") conv7 = SpatialDropout3D(rate=0.2, data_format='channels_first')(conv7) prediction = Conv3D(1, (1, 1, 1), activation="sigmoid", name="prediction", data_format='channels_first')(conv7) model = Model(img_input, prediction) return model
def create_model_sequential(): """ Creates model object with the sequential API: https://keras.io/models/sequential/ """ model = Sequential() # input_shape = (16, 112, 112, 3,) # other size does not work from the box input_shape = (args.volume_size, args.dim, args.dim, args.n_channels) model.add(Conv3D(64, (3, 3, 3), activation='relu', padding='same', name='conv1', input_shape=input_shape)) model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')) # 2nd layer group model.add(Conv3D(128, (3, 3, 3), activation='relu', padding='same', name='conv2')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')) # 3rd layer group model.add(Conv3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3a')) model.add(Conv3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3b')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')) # 4th layer group model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4a')) model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4b')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')) # 5th layer group model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5a')) model.add(Conv3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5b')) model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad5')) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')) model.add(Flatten()) # FC layers group model.add(Dense(4096, activation='relu', name='fc6')) model.add(Dropout(.5)) model.add(Dense(4096, activation='relu', name='fc7')) model.add(Dropout(.5)) model.add(Dense(487, activation='softmax', name='fc8')) # return model
def MultiResUnet3D(height, width, z, n_channels): ''' MultiResUNet3D Arguments: height {int} -- height of image width {int} -- width of image z {int} -- length along z axis n_channels {int} -- number of channels in image Returns: [keras model] -- MultiResUNet3D model ''' inputs = Input((height, width, z, n_channels)) mresblock1 = MultiResBlock(32, inputs) pool1 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock1) mresblock1 = ResPath(32, 4, mresblock1) mresblock2 = MultiResBlock(32*2, pool1) pool2 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock2) mresblock2 = ResPath(32*2, 3,mresblock2) mresblock3 = MultiResBlock(32*4, pool2) pool3 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock3) mresblock3 = ResPath(32*4, 2,mresblock3) mresblock4 = MultiResBlock(32*8, pool3) pool4 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock4) mresblock4 = ResPath(32*8, 1,mresblock4) mresblock5 = MultiResBlock(32*16, pool4) up6 = concatenate([Conv3DTranspose(32*8, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock5), mresblock4], axis=4) mresblock6 = MultiResBlock(32*8,up6) up7 = concatenate([Conv3DTranspose(32*4, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock6), mresblock3], axis=4) mresblock7 = MultiResBlock(32*4,up7) up8 = concatenate([Conv3DTranspose(32*2, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock7), mresblock2], axis=4) mresblock8 = MultiResBlock(32*2,up8) up9 = concatenate([Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock8), mresblock1], axis=4) mresblock9 = MultiResBlock(32,up9) conv10 = conv3d_bn(mresblock9 , 1, 1, 1, 1, activation='sigmoid') model = Model(inputs=[inputs], outputs=[conv10]) return model
def model_4(input_shape): inputs = Input(input_shape) x = Conv3D(filters=16, kernel_size=3, activation='relu', kernel_regularizer=l2(0.00005))(inputs) x = Conv3D(filters=16, kernel_size=3, activation='relu', kernel_regularizer=l2(0.00005))(x) x = MaxPooling3D(pool_size=2)(x) x = Conv3D(filters=64, kernel_size=3, activation='relu', kernel_regularizer=l2(0.00005))(x) x = Conv3D(filters=64, kernel_size=3, activation='relu', kernel_regularizer=l2(0.00005))(x) x = Conv3D(filters=64, kernel_size=3, activation='relu', kernel_regularizer=l2(0.00005))(x) x = MaxPooling3D(pool_size=2)(x) x = BatchNormalization(momentum=0.9)(x) x = Conv3D(filters=128, kernel_size=3, activation='relu', kernel_regularizer=l2(0.00005))(x) x = Conv3D(filters=128, kernel_size=3, activation='relu', kernel_regularizer=l2(0.00005))(x) x = Conv3D(filters=128, kernel_size=3, activation='relu', kernel_regularizer=l2(0.00005))(x) x = MaxPooling3D(pool_size=2, strides=2)(x) x = Flatten()(x) x = Dropout(rate=0.2)(x) x = Dense(units=256, activation='relu')(x) x = Dense(units=128, activation='relu')(x) outputs = Dense(units=3, activation="softmax")(x) model = tf.keras.Model(inputs, outputs, name="model_4_pet") return model
def create_model_3D(self, input_shape, n_labels=2): # Input layer inputs = Input(input_shape) # Start the CNN Model chain with adding the inputs as first tensor cnn_chain = inputs # Cache contracting normalized conv layers # for later copy & concatenate links contracting_convs = [] # First contracting layer neurons = self.feature_map[0] cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) contracting_convs.append(cnn_chain) cnn_chain = MaxPooling3D(pool_size=(1, 2, 2))(cnn_chain) # Remaining contracting layers for i in range(1, len(self.feature_map)): neurons = self.feature_map[i] cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) contracting_convs.append(cnn_chain) cnn_chain = MaxPooling3D(pool_size=(2, 2, 2))(cnn_chain) # Middle Layer neurons = self.feature_map[-1] cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) # Expanding Layers except last layer for i in reversed(range(1, len(self.feature_map))): neurons = self.feature_map[i] cnn_chain = Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2), padding='same')(cnn_chain) cnn_chain = concatenate([cnn_chain, contracting_convs[i]], axis=-1) cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) # Last expanding layer neurons = self.feature_map[0] cnn_chain = Conv3DTranspose(neurons, (1, 2, 2), strides=(1, 2, 2), padding='same')(cnn_chain) cnn_chain = concatenate([cnn_chain, contracting_convs[0]], axis=-1) cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1) # Output Layer conv_out = Conv3D(n_labels, (1, 1, 1), activation=self.activation)(cnn_chain) # Create Model with associated input and output layers model = Model(inputs=[inputs], outputs=[conv_out]) # Return model return model
def __init__(self): super(CNN_Model, self).__init__() with tf.device("/gpu:0"): self.conv1 = Conv3D(64, data_format='channels_last', kernel_size=(7, 7, 7), strides=(2, 2, 2), padding='valid', activation='relu') with tf.device("/gpu:1"): self.conv2 = Conv3D(64, kernel_size=(3, 3, 3), padding='valid', activation='relu') with tf.device("/gpu:2"): self.conv3 = Conv3D(128, kernel_size=(3, 3, 3), padding='valid', activation='relu') self.maxPool1 = MaxPooling3D(pool_size=(2, 2, 2), padding='valid') with tf.device("/gpu:3"): self.conv4 = Conv3D(128, kernel_size=(3, 3, 3), padding='valid', activation='relu') self.maxPool2 = MaxPooling3D(pool_size=(2, 2, 2), padding='valid') with tf.device("/gpu:4"): self.conv5 = Conv3D(128, kernel_size=(3, 3, 3), padding='valid', activation='relu') self.maxPool3 = MaxPooling3D(pool_size=(2, 2, 2), padding='valid') self.flatten = Flatten() self.dense1 = Dense(256, activation='relu') self.dropout1 = Dropout(0.7) self.dense2 = Dense(256, activation='relu') self.dropout2 = Dropout(0.7) self.dense3 = Dense(2, activation='softmax')