def make_model(): model = models.Sequential() model.add( layers.Convolution3D(16, 1, 3, 3, input_shape=(1, 24, 128, 128), activation='relu')) model.add(layers.Convolution3D(32, 1, 3, 3, activation='relu')) model.add(layers.MaxPooling3D(pool_size=(1, 2, 2))) model.add(layers.Convolution3D(32, 1, 3, 3, activation='relu')) model.add(layers.Convolution3D(64, 1, 3, 3, activation='relu')) model.add(layers.MaxPooling3D(pool_size=(1, 2, 2))) model.add(layers.Convolution3D(64, 1, 3, 3, activation='relu')) model.add(layers.Convolution3D(128, 1, 3, 3, activation='relu')) model.add(layers.MaxPooling3D(pool_size=(1, 2, 2))) model.add(layers.Convolution3D(128, 1, 3, 3, activation='relu')) model.add(layers.MaxPooling3D(pool_size=(1, 2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(2, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.fit(trn_x, trn_y, batch_size=24, nb_epoch=10, verbose=1, validation_data=(val_x, val_y))
def create_voxnet_model_small(input_shape, output_size): """ Creates a small VoxNet. See: http://dimatura.net/publications/3dcnn_lz_maturana_scherer_icra15.pdf Args: input_shape (shape): Input-shape. output_size (int): Output-size. Returns: Model: A model. """ #Trainable params: 301,378 model = models.Sequential(name="C7-F32-P2-C5-F64-P2-D512") model.add(layers.Reshape(target_shape=input_shape + (1,), input_shape=input_shape)) model.add(layers.Conv3D(32, (7, 7, 7), activation="relu")) model.add(layers.MaxPooling3D((4, 4, 4))) model.add(layers.Conv3D(64, (5, 5, 5), activation="relu")) model.add(layers.MaxPooling3D((2, 2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation="relu")) model.add(layers.Dense(output_size)) return model
def __init__(self, shape): self.re_rate = 0.9 self.model = models.Sequential() self.model.add( layers.Conv3D(16, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate), input_shape=shape)) self.model.add(layers.ReLU()) self.model.add( layers.Conv3D(16, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.ReLU()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Dropout(rate=0.25)) self.model.add( layers.Conv3D(32, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.ReLU()) self.model.add( layers.Conv3D(32, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.ReLU()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Dropout(rate=0.25)) self.model.add(layers.Flatten()) self.model.add(layers.Dense(16)) self.model.add(layers.Dense(4, activation='softmax'))
def net27(): model = Sequential() # Inputs are (27,27,103) # Conv #model.add(L.Lambda(data_to_img, input_shape=(8), output_shape=(103,27,27,1))) model.add( L.Conv3D(32, (32, 4, 4), activation='relu', input_shape=(103, 27, 27, 1))) model.add(L.MaxPooling3D(pool_size=(1, 2, 2))) model.add(L.Conv3D(64, (32, 5, 5), activation='relu')) model.add(L.MaxPooling3D(pool_size=(1, 2, 2))) model.add(L.Dropout(.5)) model.add(L.Conv3D(128, (32, 4, 4), activation='relu')) model.add(L.Dropout(.5)) # Fully Connected model.add(L.Flatten()) model.add(L.Dense(128, activation='relu')) #model.add(L.Conv2D(1,128,1 activation='relu')) model.add(L.Dropout(.5)) model.add(L.Dense(9, activation='softmax')) #model.add(L.Conv2D(1,128,1 activation='softmax')) # Loss model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy']) plot_model(model, show_shapes=True, to_file='model.png') return model
def _pooling_function(self, inputs, pool_size, strides, padding, data_format): input_real, input_imag = complex_to_real_imag(inputs) real_outputs = KL.MaxPooling3D(pool_size, strides, padding)(input_real) imag_outputs = KL.MaxPooling3D(pool_size, strides, padding)(input_imag) outputs = real_imag_to_complex(real_outputs, imag_outputs) return outputs
def D3GenerateModel_old(n_filter=64, number_of_class=1, input_shape=(16,144,144,1),activation_last='sigmoid', metrics=['mse', 'acc', auc],loss='mse', optimizer='adam',dropout=0.5, init='glorot_uniform'): filter_size =16 model = Sequential() #1 layer model.add(layers.Conv3D(filters=filter_size, input_shape=input_shape, kernel_size=(2,2,2), strides=(1,1, 1), padding='same', activation='relu')) model.add(layers.Conv3D(filters=filter_size, input_shape=input_shape, kernel_size=(2,2,2), strides=(1,1, 1), padding='same', activation='relu')) model.add(layers.MaxPooling3D((1, 2,2), strides=(1,2,2), padding='valid')) #2 layer for i in range(1,5): model.add(layers.Conv3D(filters=filter_size, kernel_size=(2,2,2), strides=(1,1,1), padding='same', activation='relu')) model.add(layers.Conv3D(filters=filter_size*i, kernel_size=(2,2,2), strides=(1,1,1), padding='same', activation='relu')) model.add(layers.MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='valid')) model.add(layers.Flatten()) model.add(layers.Dense(2048, activation='relu')) model.add(layers.Dropout(.5)) model.add(layers.Dense(2048, activation='relu')) model.add(layers.Dropout(.5)) model.add(layers.Dense(1, activation='linear', kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))) model.summary() model.compile(optimizer=keras.optimizers.sgd(lr=1e-4, nesterov=True),loss='hinge', metrics=metrics)#keras.optimizers.SGD return model
def getModel(): img_input = layers.Input(shape=(256, 256, 47, 1)) x = layers.Dropout(0.1)(img_input) x = layers.Conv3D(8, (7, 7, 5), activation='relu')(x) x = layers.MaxPooling3D(pool_size=(2, 2, 1))(x) x = layers.Conv3D(16, (5, 5, 3), activation='relu')(x) x = layers.MaxPooling3D(pool_size=(2, 2, 1))(x) x = layers.Conv3D(16, (5, 5, 3), activation='relu')(x) x = layers.MaxPooling3D(pool_size=(2, 2, 2))(x) x = layers.Conv3D(32, (5, 5, 3), activation='relu')(x) x = layers.Conv3D(32, (3), activation='relu')(x) x = layers.MaxPooling3D(pool_size=(2, 2, 1))(x) x = layers.Conv3D(64, (3, 3, 1), activation='relu')(x) x = layers.Conv3D(64, (3, 3, 1), activation='relu')(x) x = layers.Dropout(0.5)(x) x = layers.Flatten()(x) x = layers.Dense(512, activation='relu')(x) x = layers.Dropout(0.5)(x) x = layers.Dense(4, activation='softmax')(x) model = Model(input=img_input, output=x) model.compile(loss=categorical_crossentropy, optimizer=optimizers.RMSprop(lr=1e-4), metrics=['accuracy']) print(model.summary()) return model
def D3GenerateModel(n_filter=16, number_of_class=2, input_shape=(16,144,144,1),activation_last='sigmoid', metrics=['mse', 'acc'],loss='mse', optimizer='adam',dropout=0.5, init='glorot_uniform'): filter_size =n_filter model = Sequential() model.add(layers.Conv3D(filters=filter_size, input_shape=input_shape, kernel_size=(3,3,3), strides=(1,1, 1), padding='valid', activation='selu')) model.add(layers.Conv3D(filters=filter_size*2, kernel_size=(3,3,3), strides=(1, 2,2), padding='valid', activation='selu')) model.add(layers.MaxPooling3D((1, 2,2), padding='valid')) model.add(layers.Conv3D(filters=filter_size*2, kernel_size=(3,3,3), strides=(1,1,1), padding='valid', activation='selu')) model.add(layers.Conv3D(filters=filter_size*4, kernel_size=(3,3,3), strides=(1, 2,2), padding='valid', activation='selu')) model.add(layers.MaxPooling3D((1, 2,2), padding='valid')) model.add(layers.Conv3D(filters=filter_size*4, kernel_size=(3,3,3), strides=(1,1, 1), padding='valid', activation='selu')) model.add(layers.Conv3D(filters=filter_size*8, kernel_size=(3,3,3), strides=(1, 2,2), padding='valid', activation='selu')) model.add(layers.MaxPooling3D((1,2, 2), padding='same')) model.add(layers.Conv3D(filters=filter_size*16, kernel_size=(3,3,3), strides=(1,1, 1), padding='same', activation='selu')) model.add(layers.Conv3D(filters=filter_size*32, kernel_size=(3,3,3), strides=(2,2, 2), padding='same', activation='selu')) #model.add(layers.MaxPooling2D((2, 2), padding='valid')) model.add(layers.GlobalMaxPooling3D()) #Encoder model.add(layers.Dense(512, activation='selu')) model.add(keras.layers.Dropout(0.5)) model.add(layers.Dense(256, activation='selu')) model.add(layers.Dense(2, activation='softmax'))#, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))) model.summary() model.compile(optimizer=keras.optimizers.adam(lr=2e-6),loss='categorical_crossentropy', metrics=metrics) return model
def irn2_stem(input_tensor): x = layers.Conv3D(32, (3,3,3), padding='valid', strides=2, activation='relu')(input_tensor) x = layers.Conv3D(32, (3,3,3), padding='valid', activation='relu')(x) x = layers.Conv3D(64, (3,3,3), padding='same', activation='relu')(x) mp_1 = layers.MaxPooling3D((3,3,3), strides=2)(x) x = layers.Conv3D(96, (3,3,3), padding='valid', strides=2, activation='relu')(x) x = layers.concatenate([mp_1, x], axis=-1) b_1 = layers.Conv3D(64, (1,1,1), padding='same', activation='relu')(x) b_1 = layers.Conv3D(96, (3,3,3), padding='valid', activation='relu')(b_1) b_2 = layers.Conv3D(64, (1,1,1), padding='same', activation='relu')(x) b_2 = layers.Conv3D(64, (7,1,1), padding='same')(b_2) b_2 = layers.Conv3D(64, (1,7,1), padding='same')(b_2) b_2 = layers.Conv3D(64, (1,1,7), padding='same', activation='relu')(b_2) b_2 = layers.Conv3D(96, (3,3,3), padding='valid', activation='relu')(b_2) cat_2 = layers.concatenate([b_1, b_2], axis=-1) b_3 = layers.Conv3D(192, (3,3,3), padding='valid', strides=2, activation='relu')(cat_2) b_4 = layers.MaxPooling3D((3,3,3), padding='valid', strides=2)(cat_2) out = layers.concatenate([b_3, b_4], axis=-1) return out
def create_3DCNN_model(input_shape): """Build architecture of the model""" model = Sequential() model.add(layers.Conv3D(32, (3, 3, 3), input_shape=input_shape, activation="relu", padding="same")) model.add(layers.Conv3D(64, (3, 3, 3), activation="selu", padding="same")) model.add(layers.MaxPooling3D(pool_size=(3, 3, 3))) model.add(layers.Conv3D(64, (3, 3, 3), activation="selu", padding="same")) model.add(layers.Conv3D(64, (3, 3, 3), activation="selu", padding="same")) model.add(layers.MaxPooling3D(pool_size=(2, 2, 2))) model.add(layers.Conv3D(128, (3, 3, 3), activation="selu", padding="same")) model.add(layers.MaxPooling3D(pool_size=(2, 2, 2), padding="same")) model.add(layers.Flatten()) model.add(layers.Dense(64, activation="selu", kernel_regularizer=regularizers.l2(0.001))) model.add(layers.Dropout(0.2)) model.add(layers.Dense(32, activation="selu")) model.add(layers.Dense(10, activation="softmax")) # Create model model.compile(optimizer=tf.train.AdamOptimizer(), loss="categorical_crossentropy", metrics=["accuracy"]) return model
def CNN_Classification(): image_channels = 1 model = models.Sequential() model.add( layers.Conv3D(32, (3, 3, 3), activation='relu', input_shape=(32, 32, 32, image_channels))) model.add(layers.MaxPooling3D((2, 2, 2))) model.add(layers.Conv3D(64, (3, 3, 3), activation='relu')) model.add(layers.MaxPooling3D((2, 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Flatten()) model.add(layers.Dense(216, activation='relu')) model.add(layers.Dropout(0.5)) model.add(layers.Dense(108, activation='relu')) model.add(layers.Dropout(0.5)) model.add( layers.Dense(1, kernel_initializer='normal', activation='sigmoid')) ##Couche de sortie #model.summary() model.compile(loss='binary_crossentropy', optimizer=optimizers.Adam(lr=1e-3)) return model
def _new_model(self): inputs = layers.Input(shape=self.input_size) conv1 = layers.Conv3D(16, (3, 3, 3), activation='relu', padding='same')(inputs) conv1 = layers.Conv3D(16, (3, 3, 3), activation='relu', padding='same')(conv1) pool1 = layers.MaxPooling3D(pool_size=(2, 2, 2))(conv1) conv2 = layers.Conv3D(32, (3, 3, 3), activation='relu', padding='same')(pool1) conv2 = layers.Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv2) pool2 = layers.MaxPooling3D(pool_size=(2, 2, 2))(conv2) conv3 = layers.Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool2) conv3 = layers.Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv3) pool3 = layers.MaxPooling3D(pool_size=(2, 2, 2))(conv3) conv4 = layers.Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool3) conv4 = layers.Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv4) up5 = layers.Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding='same')(conv4) conc5 = layers.concatenate([up5, conv3]) conv5 = layers.Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conc6) conv5 = layers.Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv5) up6 = layers.Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same')(conv5) conc6 = layers.concatenate([up6, conv2]) conv6 = layers.Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conc6) conv6 = layers.Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv6) up7 = layers.Conv3DTranspose(16, (2, 2, 2), strides=(2, 2, 2), padding='same')(conv6) conc7 = layers.concatenate([up7, conv1]) conv7 = layers.Conv3D(16, (3, 3, 3), activation='relu', padding='same')(conc7) conv7 = layers.Conv3D(16, (3, 3, 3), activation='relu', padding='same')(conv7) outputs = layers.Conv3D(1, (1, 1, 1), activation='sigmoid')(conv7) self.model = Model(inputs=inputs, outputs=outputs)
def __init__(self, shape): self.re_rate = 0.9 self.model = models.Sequential() self.model.add(layers.Conv3D(16, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), input_shape=shape)) self.model.add(layers.BatchNormalization()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Conv3D(32, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.BatchNormalization()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Conv3D(64, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.BatchNormalization()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Conv3D(128, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate))) self.model.add(layers.BatchNormalization()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Flatten()) # three rate from 0.8 to 0.6 and the first dense from 64 to 128, # while dropout rate between dense changed from 0.2 to 0.5 # then, on one side, it can't overfit all train data, while the acc of val dataset # can be reduce when the train dataset's acc is too high # four change: the first dropout's rate from 0.6 to 0.7 # the sencond Dense's kernel number from 32 to 64 # overfit to train set but not up to 1 just 92% # fifth change: add conv and BN and change the first dropout's rate from 0.6 to 0.7 # acc at train dataset is above 98%, while it's just 63% at val dataset # sixth: double these conv with kernel size =(1, 1, 1), and change the second dropout rate from 0.5 to 0.6 # acc at val dataset is 64.4% # seventh: change the kernel size from (1, 1, 1) to (3, 3, 3) and add padding='same' # after change, this becomes come overfit even train dataset # thus change network to sixth and then double the conv kernel number and change the first # dropout rate from 0.7 to 0.8 and change the second rate from 0.6 to 0.7, find this can't overfit # change the second dropout rate from 0.7 to 0.6, can't overfit # next, change first dropout rate from 0.8 to 0.7 with the second dropout rate is 0.6 self.model.add(layers.Dropout(rate=0.7)) self.model.add(layers.Dense(128, activation='relu')) # one change add dropout rate = 0.3 can't overfit # two change change rate from 0.3 to 0.2, can't overfit but the train set's # acc is close to val set self.model.add(layers.Dropout(rate=0.6)) # end self.model.add(layers.Dense(64, activation='relu')) # self.model.add(layers.Dense(8, activation='relu')) # self.model.add(layers.Dense(3, activation='softmax')) self.model.add(layers.Dense(1, activation='sigmoid'))
def __init__(self, shape): self.re_rate = 0.9 self.inputs = layers.Input(shape=shape) self.f_block = layers.Conv3D(4, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2( self.re_rate), padding='same')(self.inputs) self.bn = layers.BatchNormalization()(self.f_block) self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.bn) self.f_block1 = layers.Conv3D(8, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2( self.re_rate), padding='same')(self.mp1) self.bn = layers.BatchNormalization()(self.f_block1) self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.bn) self.f_block2 = layers.Conv3D(8, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2( self.re_rate), padding='same')(self.mp2) self.f_block2 = layers.BatchNormalization()(self.f_block2) self.b_back2 = layers.Conv3D(8, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2( self.re_rate), padding='same')(self.f_block2) self.b_back2 = layers.BatchNormalization()(self.b_back2) self.b_back2 = layers.Conv3D( 8, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(layers.UpSampling3D((2, 2, 2))(self.f_block2)) self.b_back2 = layers.BatchNormalization()(self.b_back2) self.cat2 = layers.concatenate([self.f_block1, self.b_back2]) self.bn = layers.BatchNormalization()(self.cat2) self.b_back1 = layers.Conv3D( 8, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(layers.UpSampling3D((2, 2, 2))(self.bn)) self.b_back1 = layers.BatchNormalization()(self.b_back1) self.gb = layers.GlobalAveragePooling3D()(self.b_back1) self.drop = layers.Dropout(rate=0.9)(self.gb) self.dense = layers.Dense(1, activation='sigmoid')(self.drop) self.model = keras.Model(input=[self.inputs], output=self.dense)
def vanilla_base_elu(input_tensor): x = layers.Conv3D(32, (3,3,3), activation='elu')(input_tensor) x = layers.MaxPooling3D((2,2,2))(x) x = layers.Conv3D(64, (3,3,3), activation='elu')(x) x = layers.MaxPooling3D((2,2,2))(x) x = layers.Conv3D(128, (3,3,3), activation='elu')(x) x = layers.MaxPooling3D((2,2,2))(x) x = layers.Conv3D(256, (3,3,3), activation='elu')(x) x = layers.MaxPooling3D((2,2,2))(x) x = layers.Flatten()(x) return x
def CNN(): num_channels = 1 num_mask_channels = 1 img_shape = (None, None, None, 1) inputs = Input(shape = img_shape) conv1 = layers.Conv3D(32, 3, padding='same')(inputs) conv1 = layers.BatchNormalization()(conv1) conv1 = Activation('relu')(conv1) conv1 = layers.Conv3D(32, 3, padding='same')(conv1) conv1 = layers.BatchNormalization()(conv1) conv1 = Activation('relu')(conv1) pool1 = layers.MaxPooling3D(pool_size=(2, 2, 2))(conv1) conv2 = layers.Conv3D(64, 3, padding='same')(pool1) conv2 = layers.BatchNormalization()(conv2) conv2 = Activation('relu')(conv2) conv2 = layers.Conv3D(64, 3, padding='same')(conv2) conv2 = layers.BatchNormalization()(conv2) conv2 = Activation('relu')(conv2) pool2 = layers.MaxPooling3D(pool_size=(2, 2, 2))(conv2) conv3 = layers.Conv3D(128, 3, padding='same')(pool2) conv3 = layers.BatchNormalization()(conv3) conv3 = Activation('relu')(conv3) conv3 = layers.Conv3D(128, 3, padding='same')(conv3) conv3 = layers.BatchNormalization()(conv3) conv3 = Activation('relu')(conv3) conv3 = layers.UpSampling3D(size=(2, 2, 2))(conv3) up4 = layers.concatenate([conv3, conv2]) conv4 = layers.Conv3DTranspose(64, 3, padding='same')(up4) conv4 = layers.BatchNormalization()(conv4) conv4 = Activation('relu')(conv4) conv4 = layers.Conv3DTranspose(64, 3, padding='same')(conv4) conv4 = layers.BatchNormalization()(conv4)##conv ou crop conv4 = Activation('relu')(conv4) conv4 = layers.Conv3DTranspose(64, 1, padding='same')(conv4) conv4 = layers.UpSampling3D(size=(2, 2, 2))(conv4) up5 = layers.concatenate([conv4, conv1]) conv5 = layers.Conv3DTranspose(32, 3, padding='same')(up5) conv5 = layers.BatchNormalization()(conv5) conv5 = Activation('relu')(conv5) conv5 = layers.Conv3DTranspose(32, 3, padding='same')(conv5) conv5 = layers.BatchNormalization()(conv5)##conv ou crop conv5 = Activation('relu')(conv5) conv5 = layers.Conv3DTranspose(1, 1, padding='same', activation='relu')(conv5) model = Model(inputs=inputs, outputs=conv5) #model.summary() return(model)
def load_model(): input_tensor = Input(shape=(SequenceLength, IMSIZE[0], IMSIZE[1], 3)) x = layers.ConvLSTM2D(32, kernel_size=(7, 7), padding='valid', return_sequences=True)(input_tensor) x = layers.Activation('relu')(x) x = layers.MaxPooling3D(pool_size=(1, 2, 2))(x) x = layers.ConvLSTM2D(64, kernel_size=(5, 5), padding='valid', return_sequences=True)(x) x = layers.MaxPooling3D(pool_size=(1, 2, 2))(x) x = layers.ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True)(x) x = layers.Activation('relu')(x) x = layers.ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True)(x) x = layers.Activation('relu')(x) x = layers.ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True)(x) x = layers.MaxPooling3D(pool_size=(1, 2, 2))(x) x = layers.Dense(320)(x) x = layers.Activation('relu')(x) x = layers.Dropout(0.5)(x) out_shape = x.get_shape().as_list() x = layers.Reshape( (SequenceLength, out_shape[2] * out_shape[3] * out_shape[4]))(x) x = layers.Bidirectional(layers.LSTM(64, return_sequences=True), merge_mode='concat')(x) x = layers.Dropout(0.5)(x) x = layers.Flatten()(x) x = layers.Dense(128, activation='relu')(x) output_tensor = layers.Dense(N_CLASSES, activation='softmax')(x) model = Model(input_tensor, output_tensor) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) return model
def naive_inception(input_shape=(64,64,64,2)): input_tensor = tensor_input(input_shape) i1 = naive_inception_block(input_tensor, 16, 8, 16, 8, 16, 16) r1 = basic_reduction_block(i1, 16, 32, 16, 24, 32) x = layers.Conv3D(128, (3,3,3), activation='relu')(r1) x = layers.MaxPooling3D((2,2,2))(x) i2 = naive_inception_block(x , 32, 16, 32, 16, 32, 32) r2 = basic_reduction_block(i2, 32, 64, 32, 50, 64) n = layers.Conv3D(512, (3,3,3), activation='relu')(r2) n = layers.MaxPooling3D((2,2,2))(n) n = layers.Conv3D(512, (2,2,2), activation='relu')(n) n = layers.Flatten()(n) n = layers.Dropout(0.2)(n) n = layers.Dense(1024, activation='relu')(n) n = layers.Dense(512, activation='relu')(n) n = layers.Dense(128, activation='relu')(n) n = layers.Dense(64, activation='relu')(n) density = head(n, 'density') detvel = head(n, 'detvel') detpres = head(n, 'detpres') dipole = head(n, 'dipole') energy = head(n, 'energy') hof = head(n, 'hof') temp = head(n, 'temp') gap = head(n, 'gap') out_list = [density, detvel, detpres, dipole, energy, hof, temp, gap] model = models.Model(input_tensor, out_list) model.compile(optimizer='adam', loss=['mse']*len(out_list), loss_weights=loss_weights, metrics=['mae']) return model
def __init__(self, shape): self.re_rate = 0.9 dr = 0.9 self.inputs = layers.Input(shape=shape) self.f_block = layers.Conv3D(4, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(self.inputs) self.mp1 = layers.MaxPooling3D((2, 2, 2))(self.f_block) self.bn1 = layers.BatchNormalization()(self.mp1) self.f_block1 = layers.Conv3D(16, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(self.bn1) self.mp2 = layers.MaxPooling3D((2, 2, 2))(self.f_block1) self.bn2 = layers.BatchNormalization()(self.mp2) self.f_block2 = layers.Conv3D(32, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(self.bn2) self.mp3 = layers.MaxPooling3D((2, 2, 2))(self.f_block2) self.bn3 = layers.BatchNormalization()(self.mp3) self.f_block3 = layers.Conv3D(64, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(self.bn3) self.f_block3 = layers.BatchNormalization()(self.f_block3) self.b_back3 = layers.Conv3D(128, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(self.f_block3) self.b_back3 = layers.BatchNormalization()(self.b_back3) self.b_back2 = layers.Conv3D(64, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(layers.UpSampling3D((2, 2, 2))(self.b_back3)) self.b_back2 = layers.BatchNormalization()(self.b_back2) self.b_back1 = layers.Conv3D(32, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate), padding='same')(layers.UpSampling3D((2, 2, 2))(self.b_back2)) self.b_back1 = layers.BatchNormalization()(self.b_back1) self.gb = layers.GlobalAveragePooling3D()(self.b_back1) self.dr = layers.Dropout(rate=dr)(self.gb) self.dense = layers.Dense(1, activation='sigmoid')(self.dr) self.model = keras.Model(input=self.inputs, output=self.dense)
def PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding): """ Apply Conv2D `n_channels` times and concatenate all capsules :param inputs: 4D tensor, shape=[None, width, height, channels] :param dim_capsule: the dim of the output vector of capsule :param n_channels: the number of types of capsules :return: output tensor, shape=[None, num_capsule, dim_capsule] """ outputC = layers.Conv3D(filters=dim_capsule * n_channels, kernel_size=kernel_size, strides=strides, padding=padding, name='primarycap_conv2d')(inputs) conv1Caps = layers.MaxPooling3D(pool_size=(2, 3, 4), strides=None, padding='valid', data_format=None)(outputC) conv1Norm = layers.BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None)(conv1Caps) outputs = layers.Reshape(target_shape=[-1, dim_capsule], name='primarycap_reshape')(conv1Norm) outputsDO = layers.Dropout(0.5)(outputs) return layers.Lambda(squash, name='primarycap_squash')(outputsDO)
def DenseNet3D(blocks, input_shape=None, classes=1000): img_input = layers.Input(shape=input_shape) bn_axis = 4 # Conv Layer 1 x = layers.ZeroPadding3D(padding=((3, 3), (3, 3), (3, 3)))(img_input) x = layers.Conv3D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x) x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(x) x = layers.Activation('relu', name='conv1/relu')(x) x = layers.ZeroPadding3D(padding=((1, 1), (1, 1), (1, 1)))(x) x = layers.MaxPooling3D(3, strides=2, name='pool1')(x) # Dense Blocks for i, block in enumerate(blocks): x = dense_block3D(x, block, name='conv' + str(i + 2)) if i < len(blocks) - 1: x = transition_block3D(x, 0.5, name='pool' + str(i + 2)) # Final Layers x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x) x = layers.GlobalAveragePooling3D(name='avg_pool')(x) x = layers.Dense(classes, activation='softmax', name='fc')(x) # Create model model = models.Model(img_input, x, name='densenet3D') return model
def build_model(model_type='regression', conv_layer_sizes=(16, 16, 16), dense_layer_size=16, dropout_rate=0.5): """ """ # make sure requested model type is valid if model_type not in ['regression', 'classification']: print('Requested model type {0} is invalid'.format(model_type)) sys.exit(1) # instantiate a 3D convnet model = models.Sequential() model.add(layers.Conv3D(filters=conv_layer_sizes[0], kernel_size=(3, 3, 3), input_shape=(16, 16, 16, 14))) model.add(layers.Activation(activation='relu')) for c in conv_layer_sizes[1:]: model.add(layers.Conv3D(filters=c, kernel_size=(3, 3, 3))) model.add(layers.Activation(activation='relu')) model.add(layers.MaxPooling3D(pool_size=(2, 2, 2))) model.add(layers.Flatten()) model.add(layers.Dropout(rate=dropout_rate)) model.add(layers.Dense(units=dense_layer_size, activation='relu')) model.add(layers.Dropout(rate=dropout_rate)) # the last layer is dependent on model type if model_type == 'regression': model.add(layers.Dense(units=1)) else: model.add(layers.Dense(units=3, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=0.0001), metrics=['accuracy']) return model
def create_voxnet_model_homepage(input_shape, output_size): """ Creates a small VoxNet. See: http://dimatura.net/publications/3dcnn_lz_maturana_scherer_icra15.pdf Note: This is the latest model that the VoxNet-authors used. Args: input_shape (shape): Input-shape. output_size (int): Output-size. Returns: Model: A model. """ # Trainable params: 916,834 model = models.Sequential(name="VoxNetHomepage") model.add(layers.Reshape(target_shape=input_shape + (1,), input_shape=input_shape)) model.add(layers.Conv3D(32, (5, 5, 5), strides=(2, 2, 2), activation="relu")) model.add(layers.Conv3D(32, (3, 3, 3), strides=(1, 1, 1), activation="relu")) model.add(layers.MaxPooling3D((2, 2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(128, activation="relu")) model.add(layers.Dense(output_size)) return model
def MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_first'): return kl.MaxPooling3D(pool_size, strides=strides, padding=padding, data_format=data_format)
def D3GenerateModel(n_filter=16, number_of_class=1, input_shape=(16,144,144,1),activation_last='softmax', metrics=['mse', 'acc', dice_coef, recall_at_thresholds, precision_at_thresholds], loss='categorical_crossentropy', dropout=0.05, init='glorot_uniform', two_output=False): #init = initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None) filter_size =n_filter input_x = layers.Input(shape=input_shape,name='Input_layer', dtype = 'float32') #1 level x = layers.Conv3D(filters=filter_size, kernel_size=(5,5,5), strides = (1,1,1), kernel_initializer=init, padding='same')(input_x) x = cyclical_learning_rate.SineReLU()(x) x = layers.Conv3D(filters=filter_size, kernel_size=(5,5,5), strides=(1,1, 1), padding='same',kernel_initializer=init)(x) x = cyclical_learning_rate.SineReLU()(x) x = layers.MaxPooling3D(pool_size=(2,2,2), padding='same')(x) #2 level conv_list = [] counter = 0 x = layers.Conv3D(filters=filter_size*2, kernel_size=(3,3,3), strides=(1,1, 1), padding='same',kernel_initializer=init)(x) x = cyclical_learning_rate.SineReLU()(x) x = layers.Conv3D(filters=filter_size*2, kernel_size=(3,3,3), strides=(1,1, 1), padding='same',kernel_initializer=init)(x) x = cyclical_learning_rate.SineReLU()(x) x = layers.AveragePooling3D(pool_size=(1,2,2), padding='same')(x) x = layers.UpSampling3D(size=(1,2,2))(x) for index ,kernel_sizes in enumerate([ [(1,3,3), (3,3,1)], #Changed [(1,3,3), (1,1,3)] [(3,3,3), (3,1,3)], #Changed [(3,3,3), (3,1,3)] [(3,3,1), (3,3,3), (1,3,3)] #Changed [(3,3,1), (1,3,1)] ]): for kernel_size in (kernel_sizes): x = layers.Conv3D(filters=(filter_size*4), kernel_size=kernel_size, kernel_initializer=init, strides =(1,1,1), padding='same', name='Conv3D_%s' % (counter))(x) x = layers.BatchNormalization()(x) x = cyclical_learning_rate.SineReLU()(x) counter = counter+1 conv_list.append(x) x = layers.concatenate(conv_list) x = layers.Conv3D(filters=filter_size*8, kernel_size=(3,3,3), strides=(2,2, 2), kernel_initializer=init, padding='same')(x) x = layers.BatchNormalization()(x) x = cyclical_learning_rate.SineReLU()(x) #x = layers.MaxPooling3D(pool_size=(2,2, 2))(x) x = layers.Reshape(target_shape=[4,-1, filter_size*8])(x) x = layers.Conv2D(filters=filter_size*8, kernel_size=(1,1296), kernel_initializer=init, strides=(1,1296))(x) x = layers.BatchNormalization()(x) x = cyclical_learning_rate.SineReLU()(x) x = layers.Reshape(target_shape=[filter_size*8,-1])(x) x = layers.Conv1D(filters=2, kernel_size=filter_size*8, strides=filter_size*8, kernel_initializer=init)(x) x = layers.Softmax()(x) y = layers.Flatten()(x) #Classification model = Model(inputs=input_x, outputs=y) #optimizer = tf.contrib.opt.AdamWOptimizer(weight_decay=0.000001,lr=lr) #keras.optimizers.SGD(lr=lr, momentum=0.90, decay=decay, nesterov=False) #opt_noise = add_gradient_noise(optimizers.Adam) #optimizer = 'Adam'#opt_noise(lr, amsgrad=True)#, nesterov=True)#opt_noise(lr, amsgrad=True) import yogi optimizer = yogi.Yogi(lr=lr) #optimizer=optimizers.adam(lr, amsgrad=True) model.compile(optimizer=optimizer,loss=loss, metrics=metrics)#categorical_crossentropy return model
def reduced_irn2_stem(input_tensor): x = layers.Conv3D(32, (3,3,3), padding='valid', strides=1, activation='relu')(input_tensor) x = layers.Conv3D(32, (3,3,3), padding='valid', activation='relu')(x) x = layers.Conv3D(64, (3,3,3), padding='same', activation='relu')(x) mp_1 = layers.MaxPooling3D((3,3,3), strides=2)(x) x = layers.Conv3D(96, (3,3,3), padding='valid', strides=2, activation='relu')(x) out = layers.concatenate([mp_1, x], axis=-1) return out
def __init__(self, shape): self.re_rate = 0.9 model = models.Sequential() model.add(layers.Conv3D(16, (3, 3, 3), activation='relu', input_shape=shape, kernel_regularizer=regularizers.l2(self.re_rate))) model.add(layers.MaxPooling3D((6, 6, 6))) model.add(layers.Flatten()) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(8, activation='relu')) model.add(layers.Dense(5, activation='softmax')) self.model = model
def single_channel(self, input): conv1 = layers.Conv3D(4, (5, 6, 5), activation="relu", kernel_regularizer=regularizers.l2(self.re_rate))(input) conv2 = layers.Conv3D(8, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate))(conv1) mp_1 = layers.MaxPooling3D((2, 2, 2))(conv2) conv3 = layers.Conv3D(16, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate))(mp_1) conv4 = layers.Conv3D(16, (3, 3, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate))(conv3) mp_2 = layers.MaxPooling3D((2, 2, 2))(conv4) conv5 = layers.Conv3D(16, (3, 1, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate))(mp_2) conv6 = layers.Conv3D(16, (3, 1, 3), activation='relu', kernel_regularizer=regularizers.l2(self.re_rate))(conv5) gb = layers.GlobalAveragePooling3D()(conv6) dr = layers.Dropout(rate=self.dr)(gb) return dr
def __init__(self, shape): self.re_rate = 0.9 self.model = models.Sequential() self.model.add( layers.Conv3D(16, (5, 5, 5), kernel_regularizer=regularizers.l2(self.re_rate), input_shape=shape)) self.model.add(layers.BatchNormalization()) self.model.add(layers.ReLU()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add( layers.Conv3D(32, (4, 4, 4), kernel_regularizer=regularizers.l2(self.re_rate), input_shape=shape)) self.model.add(layers.BatchNormalization()) self.model.add(layers.ReLU()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add( layers.Conv3D(64, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate), input_shape=shape)) self.model.add(layers.BatchNormalization()) self.model.add(layers.ReLU()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add( layers.Conv3D(128, (3, 3, 3), kernel_regularizer=regularizers.l2(self.re_rate), input_shape=shape)) self.model.add(layers.BatchNormalization()) self.model.add(layers.ReLU()) self.model.add(layers.MaxPooling3D((2, 2, 2))) self.model.add(layers.Flatten()) self.model.add(layers.Dense(16)) self.model.add(layers.Dense(8)) self.model.add(layers.Dropout(0.5)) self.model.add(layers.Dense(2, activation='sigmoid'))
def irn2_ra(input_tensor): x = layers.Activation('relu')(input_tensor) b_1 = layers.MaxPooling3D((3,3,3), padding='valid', strides=2)(x) b_2 = layers.Conv3D(192, (3,3,3), padding='valid', strides=2, activation='relu')(x) b_3 = layers.Conv3D(128, (1,1,1), padding='same', activation='relu')(x) b_3 = layers.Conv3D(128, (3,3,3), padding='same', activation='relu')(b_3) b_3 = layers.Conv3D(192, (3,3,3), padding='valid', strides=2, activation='relu')(b_3) cat = layers.concatenate([b_1, b_2, b_3]) return cat