def conv_block(inp, channels, output_name, block_name='Block', dropout=0.2, depth=2, kernel_size=(3, 3, 3), activation='relu', sSE=False, cSE=False, scSE=False, bn=False): with K.name_scope(block_name): c_1 = Conv3D(channels[0], kernel_size, activation='relu', kernel_initializer='glorot_uniform', padding='same')(inp) c_1 = Dropout(dropout)(c_1) c_1 = concatenate([inp, c_1]) if scSE or (sSE and cSE): c_2 = Conv3D(channels[1], kernel_size, activation=activation, kernel_initializer='glorot_uniform', padding='same')(c_1) # cSE cse = GlobalAveragePooling3D()(c_2) cse = Dense(c_2.shape[-1] // 2, activation='relu')(cse) cse = Dense(c_2.shape[-1], activation='sigmoid')(cse) c_2_cse = Multiply()([c_2, cse]) # sSE sse = Conv3D(1, (1, 1, 1), activation='sigmoid', kernel_initializer='glorot_uniform')(c_2) c_2_sse = Multiply()([c_2, sse]) return Add(name=output_name)([c_2_cse, c_2_sse]) elif cSE: sse = Conv3D(1, (1, 1, 1), activation='sigmoid', kernel_initializer='glorot_uniform')(c_2) return Multiply([c_2, sse], name=output_name) elif sSE: # cSE cse = GlobalAveragePooling3D()(c_2) cse = Dense(c_2.shape[-1] // 2, activation='relu')(cse) cse = Dense(c_2.shape[-1], activation='sigmoid')(cse) return Multiply([c_2, cse], name=output_name) else: c_2 = Conv3D(channels[1], kernel_size, activation=activation, kernel_initializer='glorot_uniform', padding='same', name=output_name)(c_1) return c_2
def make_global_pool_layer(self, layer): if self.model_config is not None and "global_pool" in self.model_config: layer_setting = self.model_config["global_pool"] if layer_setting == "average": layer = GlobalAveragePooling3D()(layer) elif layer_setting == "max": layer = GlobalMaxPool3D()(layer) else: layer = GlobalAveragePooling3D()(layer) else: layer = GlobalAveragePooling3D()(layer) return layer
def TriangleModel(init_shape, feature_shape, feature_model, block_PD_to_T2, block_T2_to_T1, block_T1_to_PD, reconstruction_model, n_layers): inputT1 = Input(shape=init_shape) inputT2 = Input(shape=init_shape) inputPD = Input(shape=init_shape) fT1 = feature_model(inputT1) fT2 = feature_model(inputT2) fPD = feature_model(inputPD) forward_PD_to_T2 = build_forward_model(init_shape=feature_shape, block_model=block_PD_to_T2, n_layers=n_layers) forward_T2_to_T1 = build_forward_model(init_shape=feature_shape, block_model=block_T2_to_T1, n_layers=n_layers) forward_T1_to_PD = build_forward_model(init_shape=feature_shape, block_model=block_T1_to_PD, n_layers=n_layers) predT2 = reconstruction_model(forward_PD_to_T2(fPD)) predT1 = reconstruction_model(forward_T2_to_T1(fT2)) predPD = reconstruction_model(forward_T1_to_PD(fT1)) errT2 = Subtract()([inputT2, predT2]) errT2 = Lambda(lambda x: K.abs(x))(errT2) errT2 = GlobalAveragePooling3D()(errT2) errT2 = Reshape((1, ))(errT2) errT1 = Subtract()([inputT1, predT1]) errT1 = Lambda(lambda x: K.abs(x))(errT1) errT1 = GlobalAveragePooling3D()(errT1) errT1 = Reshape((1, ))(errT1) errPD = Subtract()([inputPD, predPD]) errPD = Lambda(lambda x: K.abs(x))(errPD) errPD = GlobalAveragePooling3D()(errPD) errPD = Reshape((1, ))(errPD) errsum = Add()([errT2, errT1, errPD]) model = Model(inputs=[inputT1, inputT2, inputPD], outputs=errsum) ## model = Model(inputs=[inputT1,inputT2,inputPD], outputs=errsum) # model = Model(inputs=inputT1, outputs=errsum) return model
def SE_block(input, ratio=8): x1 = GlobalAveragePooling3D()(input) x2 = Dense(x1.shape[-1] // ratio, activation='relu')(x1) x3 = Dense(x1.shape[-1], activation='sigmoid')(x2) x4 = Reshape((1, 1, 1, x1.shape[-1]))(x3) result = multiply([input, x4]) return result
def model_4(input_shape): inputs = Input(input_shape) x = Conv3D(filters=32, kernel_size=3, activation='relu')(inputs) x = Conv3D(filters=32, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = Conv3D(filters=64, kernel_size=3, activation='relu')(x) x = Conv3D(filters=64, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = BatchNormalization(momentum=0.9)(x) x = Conv3D(filters=128, kernel_size=3, activation='relu')(x) x = Conv3D(filters=128, kernel_size=3, activation='relu')(x) x = MaxPooling3D(pool_size=2)(x) x = BatchNormalization(momentum=0.9)(x) x = Conv3D(filters=256, kernel_size=3, activation='relu')(x) x = Conv3D(filters=256, kernel_size=3, activation='relu')(x) x = Conv3D(filters=256, kernel_size=3, activation='relu')(x) x = GlobalAveragePooling3D()(x) x = Dropout(rate=0.2)(x) x = Dense(units=128, activation='relu')(x) x = Dropout(rate=0.2)(x) x = Dense(units=128, activation='relu')(x) outputs = Dense(units=3, activation="softmax")(x) model = tf.keras.Model(inputs, outputs, name="model_4_mri") return model
def global_average_pool(x: Tensor) -> Tensor: if len(x.shape) == 3: return GlobalAveragePooling1D()(x) elif len(x.shape) == 4: return GlobalAveragePooling2D()(x) elif len(x.shape) == 5: return GlobalAveragePooling3D()(x)
def train_model(): training_X, training_Y = getAllTrainingData() #load model here base_model = load_models('I3D_model/Original_model/112_dims') #ResNet50(weights='imagenet', include_top=False, input_shape=(224,224,3), classes=2) #base_model = Xception(weights='imagenet', include_top=False) x = base_model.output x = GlobalAveragePooling3D()(x) x = BatchNormalization()(x) x = Dropout(0.4)(x) predictions = Dense(1, activation='sigmoid')(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) model.fit(training_X, training_Y, batch_size=batchsize, epochs=16, validation_split=0.1, shuffle=True) # Save model save_model(model) return model
def DenseNet(x, blocks, name): weight_decay = 1e-4 eps = 1.1e-5 #initial_layer x = Conv3D(64, kernel_size=(5, 5, 5), padding='same', use_bias=False, name=name + 'conv1/conv')(x) x = BatchNormalization(axis=4, momentum=0.8, epsilon=eps, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay), name=name + 'conv1/bn')(x) x = LeakyReLU(alpha=0.3, name=name + 'conv1/relu')(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=2, name=name + 'pool1')(x) x = dense_block(x, blocks[0], name=name + 'conv2') x = transition_block(x, 0.5, name=name + 'pool2') x = dense_block(x, blocks[1], name=name + 'conv3') x = transition_block(x, 0.5, name=name + 'pool3') x = dense_block(x, blocks[2], name=name + 'conv4') x = transition_block(x, 0.5, name=name + 'pool4') x = dense_block(x, blocks[3], name=name + 'conv5') x = GlobalAveragePooling3D(name=name + 'avg_pool')(x) return x
def Fast_body(x, layers, block): fast_inplanes = 8 lateral = [] x = Conv_BN_ReLU(8, kernel_size=(5, 7, 7), strides=(1, 2, 2))(x) #x = Dropout(0.5)(x) x = MaxPool3D(pool_size=(1, 3, 3), strides=(1, 2, 2), padding='same')(x) lateral_p1 = Conv3D(8 * 2, kernel_size=(5, 1, 1), strides=(8, 1, 1), padding='same', use_bias=False)(x) lateral.append(lateral_p1) x, fast_inplanes = make_layer_fast(x, block, 8, layers[0], head_conv=3, fast_inplanes=fast_inplanes) lateral_res2 = Conv3D(32 * 2, kernel_size=(5, 1, 1), strides=(8, 1, 1), padding='same', use_bias=False)(x) lateral.append(lateral_res2) x, fast_inplanes = make_layer_fast(x, block, 16, layers[1], stride=2, head_conv=3, fast_inplanes=fast_inplanes) lateral_res3 = Conv3D(64 * 2, kernel_size=(5, 1, 1), strides=(8, 1, 1), padding='same', use_bias=False)(x) lateral.append(lateral_res3) x, fast_inplanes = make_layer_fast(x, block, 32, layers[2], stride=2, head_conv=3, fast_inplanes=fast_inplanes) lateral_res4 = Conv3D(128 * 2, kernel_size=(5, 1, 1), strides=(8, 1, 1), padding='same', use_bias=False)(x) lateral.append(lateral_res4) x, fast_inplanes = make_layer_fast(x, block, 64, layers[3], stride=2, head_conv=3, fast_inplanes=fast_inplanes) x = GlobalAveragePooling3D()(x) return x, lateral
def __init__(self, num_class=2, weight_decay=0.005, drop_rate=0.): super(DenseNet_3D_Model, self).__init__() # stage 1 self.cbl_1 = CBL(filters=64) self.maxpool3d_1 = MaxPool3D((2, 2, 1), strides=(2, 2, 1), padding='same') # stage 2 self.denseblock_2 = DenseBlock(growth_rate=32, internal_layers=4, drop_rate=drop_rate) self.maxpool3d_2 = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same') self.cbl_2 = CBL(filters=128, kernel=(1, 1, 1), drop_rate=drop_rate) # stage 3 self.denseblock_3 = DenseBlock(growth_rate=32, internal_layers=4, drop_rate=drop_rate) self.maxpool3d_3 = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same') self.cbl_3 = CBL(filters=128, kernel=(1, 1, 1), drop_rate=drop_rate) # stage 4 self.denseblock_4 = DenseBlock(growth_rate=64, internal_layers=4, drop_rate=drop_rate) self.maxpool3d_4 = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same') self.cbl_4 = CBL(filters=256, kernel=(1, 1, 1), drop_rate=drop_rate) # stage 5 self.denseblock_5 = DenseBlock(growth_rate=64, internal_layers=4, drop_rate=drop_rate) self.cbl_5 = CBL(filters=256, kernel=(1, 1, 1), drop_rate=drop_rate) self.globalavgpool3d = GlobalAveragePooling3D() self.dense = Dense(num_class, activation='softmax', kernel_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))
def ca(input_tensor, filters, reduce=16): x = GlobalAveragePooling3D()(input_tensor) x = Reshape((1, 1, 1, filters))(x) x = Dense(filters/reduce, activation='relu', kernel_initializer='he_normal', use_bias=False)(x) x = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(x) x = Multiply()([x, input_tensor]) return x
def squeeze_excite_block(input, ratio=16): ''' Create a channel-wise squeeze-excite block Args: input: input tensor filters: number of output filters Returns: a keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) ''' init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 # filters = init._keras_shape[channel_axis] filters = init.shape[channel_axis] se_shape = (1, 1, 1, filters) se = GlobalAveragePooling3D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((4, 1, 2, 3))(se) x = multiply([init, se]) return x
def conv3d(layer_input, filters, axis=-1, se_res_block=True, se_ratio=16, down_sizing=True): if down_sizing == True: layer_input = MaxPooling3D(pool_size=(2, 2, 2))(layer_input) d = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(layer_input) d = InstanceNormalization(axis=axis)(d) d = LeakyReLU(alpha=0.3)(d) d = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(d) d = InstanceNormalization(axis=axis)(d) if se_res_block == True: se = GlobalAveragePooling3D()(d) se = Dense(filters // se_ratio, activation='relu')(se) se = Dense(filters, activation='sigmoid')(se) se = Reshape([1, 1, 1, filters])(se) d = Multiply()([d, se]) shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(layer_input) shortcut = InstanceNormalization(axis=axis)(shortcut) d = add([d, shortcut]) d = LeakyReLU(alpha=0.3)(d) return d
def build_model( shape=(50, 50, 20, 2), network_depth="18-layer", optimizer=None, learning_rate=0.0005, # higher rates don't always converge loss="categorical_crossentropy", metrics=["accuracy"], number_classes=2, axis=3, starting_features=64, classification_activation="softmax", ): """ :param shape: :param network_depth: :param optimizer: :param learning_rate: :param loss: :param metrics: :param number_classes: :param int axis: Default: 3. Assumed channels are last :param starting_features: # increases in each set of residual units :param classification_activation: :return: """ blocks, bottleneck = get_resnet_blocks_and_bottleneck(network_depth) inputs = Input(shape) x = non_residual_block(inputs, starting_features, axis=axis) features = starting_features for resnet_unit_id, iterations in enumerate(blocks): for block_id in range(iterations): x = residual_block( features, resnet_unit_id, block_id, bottleneck=bottleneck, axis=axis, )(x) features *= 2 x = GlobalAveragePooling3D(name="final_average_pool")(x) x = Dense( number_classes, activation=classification_activation, name="fully_connected", )(x) # Instantiate existing_model. model = Model(inputs=inputs, outputs=x) if optimizer is None: optimizer = Adam(learning_rate=learning_rate) model.compile(optimizer, loss=loss, metrics=metrics) return model
def build_model_resnet50(input_shape, Nlabels=6, filters=16, convsize=3, convsize2=5, poolsize=2, hidden_size=256,flag_print=True): #############build resnet50 of 2d and 3d conv if not Nlabels: global target_name Nlabels = len(np.unique(target_name))+1 global Flag_CNN_Model, Flag_Simple_RESNET input0 = Input(shape=input_shape) x = conv(input0, filters, convsize2, Flag_CNN_Model, strides=2) x = bn(x) x = Activation('relu')(x) try: img_resize ## using either maxpooling or img_resampling except: if Flag_CNN_Model == '2d': x = MaxPooling2D(poolsize, padding='same')(x) elif Flag_CNN_Model == '3d': x = MaxPooling3D(poolsize, padding='same')(x) x = conv_block(x, [filters, filters, filters*2], convsize, strides=1, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2], convsize, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2], convsize, flag_2d3d=Flag_CNN_Model) filters *= 2 x = conv_block(x, [filters, filters, filters*2], convsize, strides=2, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) if not Flag_Simple_RESNET: filters *= 2 x = conv_block(x, [filters, filters, filters*2], convsize, strides=2, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) filters *= 2 x = conv_block(x, [filters, filters, filters*2], convsize, strides=2, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) x = identity_block(x, [filters, filters, filters*2],convsize, flag_2d3d=Flag_CNN_Model) if Flag_CNN_Model == '2d': x = GlobalAveragePooling2D(name='avg_pool')(x) elif Flag_CNN_Model == '3d': x = GlobalAveragePooling3D(name='avg_pool')(x) out = Dense(Nlabels, activation='softmax')(x) model = tf.keras.models.Model(input0, out) if flag_print: model.summary() return model
def build(input_shape, num_outputs, block_fn, repetitions, reg_factor): """Builds a custom ResNet like architecture. Args: input_shape: The input shape in the form (nb_channels, nb_rows, nb_cols) num_outputs: The number of outputs at final softmax layer block_fn: The block function to use. This is either `basic_block` or `bottleneck`. The original paper used basic_block for layers < 50 repetitions: Number of repetitions of various block units. At each block unit, the number of filters are doubled and the input size is halved Returns: The keras `Model`. """ _handle_data_format() if len(input_shape) != 4: raise ValueError("Input should have 4 dimensions") # Load function from str if needed. block_fn = _get_block(block_fn) input = Input(shape=input_shape) conv1 = _conv_bn_relu(filters=64, kernel_size=(7, 7, 7), strides=(2, 2, 2), kernel_regularizer=l2(reg_factor))(input) pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), padding="same")(conv1) block = pool1 filters = 64 for i, r in enumerate(repetitions): block = _residual_block(block_fn, filters=filters, kernel_regularizer=l2(reg_factor), repetitions=r, is_first_layer=(i == 0))(block) filters *= 2 # last activation block = _bn_relu(block) # Classifier block pool2 = GlobalAveragePooling3D()(block) # flatten1 = Flatten()(pool2) if num_outputs > 1: dense = Dense(units=num_outputs, kernel_initializer="he_normal", activation="softmax", kernel_regularizer=l2(reg_factor))(pool2) else: dense = Dense(units=num_outputs, kernel_initializer="he_normal", activation="sigmoid", kernel_regularizer=l2(reg_factor))(pool2) model = tf.keras.Model(inputs=input, outputs=dense) return model
def deconv3d(layer_input, skip_input, filters, axis=-1, se_res_block=True, se_ratio=16, atten_gate=False): if atten_gate == True: gating = Conv3D(filters, (1, 1, 1), use_bias=False, padding='same')(layer_input) gating = InstanceNormalization(axis=axis)(gating) attention = Conv3D(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='valid')(skip_input) attention = InstanceNormalization(axis=axis)(attention) attention = add([gating, attention]) attention = Conv3D(1, (1, 1, 1), use_bias=False, padding='same', activation='sigmoid')(attention) # attention = Lambda(resize_by_axis, arguments={'dim_1':skip_input.get_shape().as_list()[1],'dim_2':skip_input.get_shape().as_list()[2],'ax':3})(attention) # error when None dimension is feeded. # attention = Lambda(resize_by_axis, arguments={'dim_1':skip_input.get_shape().as_list()[1],'dim_2':skip_input.get_shape().as_list()[3],'ax':2})(attention) attention = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(attention) attention = UpSampling3D((2, 2, 2))(attention) attention = CropToConcat3D(mode='crop')([attention, skip_input]) attention = Lambda(lambda x: K.tile(x, [1, 1, 1, 1, filters]))( attention) skip_input = multiply([skip_input, attention]) u1 = ZeroPadding3D(((0, 1), (0, 1), (0, 1)))(layer_input) u1 = Conv3DTranspose(filters, (2, 2, 2), strides=(2, 2, 2), use_bias=False, padding='same')(u1) u1 = InstanceNormalization(axis=axis)(u1) u1 = LeakyReLU(alpha=0.3)(u1) u1 = CropToConcat3D()([u1, skip_input]) u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1) u2 = InstanceNormalization(axis=axis)(u2) u2 = LeakyReLU(alpha=0.3)(u2) u2 = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u2) u2 = InstanceNormalization(axis=axis)(u2) if se_res_block == True: se = GlobalAveragePooling3D()(u2) se = Dense(filters // se_ratio, activation='relu')(se) se = Dense(filters, activation='sigmoid')(se) se = Reshape([1, 1, 1, filters])(se) u2 = Multiply()([u2, se]) shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(u1) shortcut = InstanceNormalization(axis=axis)(shortcut) u2 = add([u2, shortcut]) u2 = LeakyReLU(alpha=0.3)(u2) return u2
def _se_block(self, inputs, block_input, filters, se_ratio=16): se = GlobalAveragePooling3D()(inputs) se = Dense(filters//se_ratio, activation='relu')(se) se = Dense(filters, activation='sigmoid')(se) se = Reshape([1, 1, 1, filters])(se) x = Multiply()([inputs, se]) shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(block_input) shortcut = self._norm(shortcut) x = Add()([x, shortcut]) return x
def myDenseNetv2Dropout(input_shape, dropout_rate=0.3): """ """ bn_axis = -1 if K.image_data_format() == 'channels_last' else 1 input_tensor = Input(shape=input_shape) #48, 240, 360, 1 x = Conv3D(16, (3, 3, 3), strides=(1, 2, 2), use_bias=False, padding='same', name='block0_conv1')(input_tensor) #[48, 120, 180] x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='block0_bn1')(x) x = Activation('relu', name='block0_relu1')(x) x = Conv3D(16, (3, 3, 3), strides=(1, 1, 1), use_bias=False, padding='same', name='block0_conv2')(x) #[48, 120, 180] x = _denseBlock(x, [16, 16], 'block_11') #[48, 120, 180] x = _transit_block(x, 16, 'block13') #[48, 120, 180] x = SpatialDropout3D(dropout_rate)(x) x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) #[24, 60, 90] x = _denseBlock(x, [24, 24, 24], 'block_21') #[24, 60, 90] x = _transit_block(x, 24, 'block23') #[24, 60, 90] x = SpatialDropout3D(dropout_rate)(x) x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) #[12, 30, 45] x = _denseBlock(x, [32, 32, 32, 32], 'block_31') #[12, 30, 45] x = SpatialDropout3D(dropout_rate)(x) x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) #[6, 15, 23] x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='block_final_bn')(x) x = Activation('relu', name='block_final_relu')(x) ##############above are bae#################### x = _denseBlock(x, [32, 32], 'EGFR_block_11') x = MaxPooling3D((1, 2, 2), strides=(1, 2, 2), padding='same')(x) #[6, 8, 12, 64] x = SpatialDropout3D(dropout_rate)(x) x = _transit_block(x, 64, 'EGFR_block_12') #[6, 8, 12, 64] x = GlobalAveragePooling3D()(x) x = Dense(1, activation='sigmoid', name='EGFR_global_pred')(x) # create model model = Model(input_tensor, x, name='myDense') plot_model(model, 'myDenseNetv2.png', show_shapes=True) return model
def build_cnn_model_test(input_shape, Nlabels=6, filters=16, convsize=3, convsize2=5, poolsize=2, hidden_size=256, conv_layers=4,flag_print=True): # import keras.backend as K # if K.image_data_format() == 'channels_first': # img_shape = (1,img_rows,img_cols) # elif K.image_data_format() == 'channels_last': # img_shape = (img_rows,img_cols,1) if not Nlabels: global target_name Nlabels = len(np.unique(target_name)) + 1 global Flag_CNN_Model input_tensor = Input(shape=input_shape) ##use tensor instead of shape ####quickly reducing image dimension first x = conv(input_tensor, filters, convsize2, Flag_CNN_Model, strides=2) x = bn(x) x = Activation('relu')(x) for li in range(conv_layers-1): x = conv(x, filters, convsize, Flag_CNN_Model) x = bn(x) x = Activation('relu')(x) x = conv(x, filters, convsize, Flag_CNN_Model) x = bn(x) x = Activation('relu')(x) x = conv(x, filters*2, convsize, Flag_CNN_Model, strides=2) x = bn(x) x = Activation('relu')(x) x = Dropout(0.25)(x) filters *= 2 #if (li+1) % 2 == 0: # filters *= 2 if Flag_CNN_Model == '2d': x = GlobalAveragePooling2D(name='avg_pool')(x) elif Flag_CNN_Model == '3d': x = GlobalAveragePooling3D(name='avg_pool')(x) out = Dense(Nlabels, activation='softmax')(x) model = Model(inputs=input_tensor, outputs=out) if flag_print: model.summary() ''' adam = Adam(lr=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) ''' return model
def _SplAtConv3d(self, input_tensor, filters=64, kernel_size=3, stride=1, dilation=1, groups=1, radix=0): x = input_tensor in_channels = input_tensor.shape[-1] x = GroupedConv3D( filters=filters * radix, kernel_size=[kernel_size for i in range(groups * radix)], use_keras=True, padding="same", kernel_initializer="he_normal", use_bias=False, data_format="channels_last", dilation_rate=dilation)(x) x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x) x = Activation(self.active)(x) print(x.shape) batch, rchannel = x.shape[0], x.shape[-1] if radix > 1: splited = tf.split(x, radix, axis=-1) gap = sum(splited) else: gap = x gap = GlobalAveragePooling3D(data_format="channels_last")(gap) gap = tf.reshape(gap, [-1, 1, 1, 1, filters]) # 3D model: add the last axis reduction_factor = 4 inter_channels = max(in_channels * radix // reduction_factor, 32) x = Conv3D(inter_channels, kernel_size=1)(gap) x = BatchNormalization(axis=self.channel_axis, epsilon=1.001e-5)(x) x = Activation(self.active)(x) x = Conv3D(filters * radix, kernel_size=1)(x) atten = self._rsoftmax(x, filters, radix, groups) if radix > 1: logits = tf.split(atten, radix, axis=-1) out = sum([a * b for a, b in zip(splited, logits)]) else: out = atten * x return out
def Slow_body(x, lateral, layers, block): slow_inplanes = 64 + 64//8*2 x = Conv_BN_ReLU(64, kernel_size=(1, 7, 7), strides=(1, 2, 2))(x) x = MaxPool3D(pool_size=(1, 3, 3), strides=(1, 2, 2), padding='same')(x) x = Concatenate()([x, lateral[0]]) x, slow_inplanes = make_layer_slow(x, block, 64, layers[0], head_conv=1, slow_inplanes=slow_inplanes) x = Concatenate()([x, lateral[1]]) x, slow_inplanes = make_layer_slow(x, block, 128, layers[1], stride=2, head_conv=1, slow_inplanes=slow_inplanes) x = Concatenate()([x, lateral[2]]) x, slow_inplanes = make_layer_slow(x, block, 256, layers[2], stride=2, head_conv=1, slow_inplanes=slow_inplanes) x = Concatenate()([x, lateral[3]]) x, slow_inplanes = make_layer_slow(x, block, 512, layers[3], stride=2, head_conv=1, slow_inplanes=slow_inplanes) x = GlobalAveragePooling3D()(x) return x
def __init__(self, modelnet, args): # TODO: Define a suitable model, and either `.compile` it, or prepare # optimizer and loss manually. inp = Input(shape=(modelnet.H, modelnet.W, modelnet.D, modelnet.C)) hidden = Conv3D(24, (3,3,3), activation=None, padding='same')(inp) hidden = SpatialDropout3D(0.4)(hidden) hidden = BatchNormalization()(hidden) hidden = Activation(tf.nn.relu)(hidden) hidden = Conv3D(24, (3,3,3), activation=None, padding='same')(hidden) hidden = SpatialDropout3D(0.4)(hidden) hidden = BatchNormalization()(hidden) hidden = Activation(tf.nn.relu)(hidden) hidden = MaxPooling3D((2,2,2))(hidden) hidden = Conv3D(48, (3,3,3), activation=None)(hidden) hidden = BatchNormalization()(hidden) hidden = Activation(tf.nn.relu)(hidden) hidden = Conv3D(48, (3,3,3), activation=None)(hidden) hidden = BatchNormalization()(hidden) hidden = Activation(tf.nn.relu)(hidden) hidden = MaxPooling3D((2,2,2))(hidden) hidden = Conv3D(96, (3,3,3), activation=None)(hidden) hidden = BatchNormalization()(hidden) hidden = Activation(tf.nn.relu)(hidden) hidden = Conv3D(96, (3,3,3), activation=None)(hidden) hidden = BatchNormalization()(hidden) hidden = Activation(tf.nn.relu)(hidden) hidden = MaxPooling3D((2,2,2))(hidden) hidden = GlobalAveragePooling3D()(hidden) output = Dense(len(modelnet.LABELS), activation=tf.nn.softmax)(hidden) self.model = tf.keras.Model(inputs=inp, outputs=output) self.model.compile( optimizer=tf.optimizers.Adam(), loss=tf.losses.SparseCategoricalCrossentropy(), metrics=[tf.metrics.SparseCategoricalAccuracy(name="accuracy")] ) self.tb_callback=tf.keras.callbacks.TensorBoard(args.logdir, update_freq=1000, profile_batch=1) self.tb_callback.on_train_end = lambda *_: None
def CNN_radiologist(input_size1, input_size2, filters=[8, 16, 32, 64]): input1 = Input(shape=input_size1, name='input1') input2 = Input(shape=input_size2, name='input2') input_feature = conv_block(input1, filters[0], name='input_conv') conv1_1 = res_bottleneck_layer_SE(input_feature, out_dim=filters[0], name='block0-1', stride=2) conv1_2 = res_bottleneck_layer_SE(conv1_1, out_dim=filters[0], name='block0-2', stride=1) conv2_1 = res_bottleneck_layer_SE(conv1_2, out_dim=filters[0], name='block1-1', stride=1) conv2_2 = res_bottleneck_layer_SE(conv2_1, out_dim=filters[0], name='block1-2', stride=1) conv3_1 = res_bottleneck_layer_SE(conv2_2, out_dim=filters[1], name='block2-1', stride=2) conv3_2 = res_bottleneck_layer_SE(conv3_1, out_dim=filters[1], name='block2-2', stride=1) conv4_1 = res_bottleneck_layer_SE(conv3_2, out_dim=filters[1], name='block3-1', stride=1) conv4_2 = res_bottleneck_layer_SE(conv4_1, out_dim=filters[1], name='block3-2', stride=1) avg_pool = GlobalAveragePooling3D()(conv4_2) avg_pool = Concatenate(axis=-1)([avg_pool, input2]) fc1 = Dense(32, name='fc1')(avg_pool) fc1 = PReLU()(fc1) fc1 = Concatenate(axis=-1)([fc1, input2]) prob = Dense(2, activation='sigmoid', name='prob')(fc1) model = Model(inputs=[input1, input2], outputs=[prob]) return model
def se_block(inputs: tf.Tensor, block_input: tf.Tensor, filters: int, se_ratio: int = 16) -> tf.Tensor: se = GlobalAveragePooling3D()(inputs) se = Dense(filters // se_ratio, activation='relu')(se) se = Dense(filters, activation='sigmoid')(se) se = Reshape([1, 1, 1, filters])(se) x = Multiply()([inputs, se]) shortcut = Conv3D(filters, (3, 3, 3), use_bias=False, padding='same')(block_input) shortcut = InstanceNormalization()(shortcut) x = Add()([x, shortcut]) return x
def fm_model(init_shape, feature_model, mapping_model): ix = Input(shape=init_shape) iy = Input(shape=init_shape) fx = feature_model(ix) fy = feature_model(iy) m = mapping_model(fx) err = Subtract()([fy, m]) err = Lambda(lambda x: K.abs(x))(err) err = GlobalAveragePooling3D()(err) err = Lambda(lambda x: K.mean(x, axis=1))(err) err = Reshape((1, ))(err) model = Model(inputs=[ix, iy], outputs=err) return model
def CNN(input_size, filters=[8, 16, 32, 64]): input = Input(shape=input_size, name='input') input_feature = conv_block(input, filters[0], name='input_conv') conv1_1 = res_bottleneck_layer_SE(input_feature, out_dim=filters[0], name='block0-1', stride=2) conv1_2 = res_bottleneck_layer_SE(conv1_1, out_dim=filters[0], name='block0-2', stride=1) conv2_1 = res_bottleneck_layer_SE(conv1_2, out_dim=filters[0], name='block1-1', stride=1) conv2_2 = res_bottleneck_layer_SE(conv2_1, out_dim=filters[0], name='block1-2', stride=1) conv3_1 = res_bottleneck_layer_SE(conv2_2, out_dim=filters[1], name='block2-1', stride=2) conv3_2 = res_bottleneck_layer_SE(conv3_1, out_dim=filters[1], name='block2-2', stride=1) # conv4_1 = res_bottleneck_layer_SE(conv3_2, out_dim=filters[2], name='block3-1', stride=1) conv4_2 = res_bottleneck_layer_SE(conv4_1, out_dim=filters[2], name='block3-2', stride=1) avg_pool = GlobalAveragePooling3D()(conv4_2) fc1 = Dense(32, name='fc1')(avg_pool) fc1 = PReLU()(fc1) prob = Dense(2, activation='sigmoid', name='prob')(fc1) model = Model(inputs=[input], outputs=[prob]) return model
def test_global_averagepooling1d2d3d(): in_w = 32 in_h = 32 in_z = 32 kernel = 32 model = Sequential(GlobalAveragePooling1D(input_shape=(in_w, kernel))) flops = get_flops(model, batch_size=1) assert flops == in_w * kernel model = Sequential(GlobalAveragePooling2D(input_shape=(in_w, in_h, kernel))) flops = get_flops(model, batch_size=1) assert flops == in_w * in_h * kernel model = Sequential(GlobalAveragePooling3D(input_shape=(in_w, in_h, in_z, kernel))) flops = get_flops(model, batch_size=1) assert flops == in_w * in_h * in_z * kernel
def build_model_cnnedit4(self): img_channels = self.img_channels img_rows = self.img_rows img_cols = self.img_cols color_channels = self.color_channels img_seq_shape = (img_channels, img_rows, img_cols, color_channels) model = Sequential() model.add(Input(shape=img_seq_shape, name='img_in')) # 4*80*80*1 model.add( Conv3D(16, (3, 5, 5), strides=(1, 2, 2), padding="same", activation='relu')) model.add( Conv3D(32, (3, 5, 5), strides=(1, 2, 2), padding="same", activation='relu')) model.add( Conv3D(64, (1, 5, 5), strides=(1, 2, 2), padding="same", activation='relu')) model.add( Conv3D(128, (1, 3, 3), strides=(1, 2, 2), padding="same", activation='relu')) model.add( Conv3D(256, (1, 3, 3), strides=(1, 1, 1), padding="same", activation='relu')) model.add(GlobalAveragePooling3D()) model.add(Dense(128, activation='relu')) model.add( Dense(self.action_size, activation='linear', name='model_outputs')) adam = Adam(lr=self.learning_rate) model.compile(loss="mse", optimizer=adam) return model
def __init__(self, layer_name='add_3', weight_decay=0.005): super(FTCF_Net, self).__init__() self.weight_decay = weight_decay self.layer_name = layer_name self.basemodel = Xception(weights='imagenet', input_shape=(224, 224, 3), include_top=False) # Feature Extractor for Every Frames self.backbone = tf.keras.Model(inputs=self.basemodel.input, outputs=self.basemodel.get_layer( self.layer_name).output) self.backbone.trainable = False self.cbm1 = CBM(filters=364, kernel=(1, 1, 2)) # Fusion Stage 1 self.ftcf1 = FTCF_Block(32) self.avgpooling3d_1 = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), padding='same') # Fusion Stage 2 self.ftcf2 = FTCF_Block(32) self.avgpooling3d_2 = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), padding='same') # Fusion Stage 3 self.ftcf3 = FTCF_Block(64) self.avgpooling3d_3 = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), padding='same') self.cbm2 = CBM(filters=128, kernel=(2, 2, 2), padding='valid') self.globalpooling3d = GlobalAveragePooling3D() self.dense_2 = Dense(512, activation='relu', kernel_regularizer=l2(self.weight_decay)) self.dropout_2 = Dropout(0.5) self.output_tensor = Dense(2, activation='softmax', kernel_regularizer=l2(self.weight_decay))