def get_unet(): inputs = Input(shape=(48, 48, 48, 1)) x = BatchNormalization()(inputs) conv1 = Convolution3D(48, 3, 3, 3, border_mode='same')(x) #dense 3 layers 1 conv1 = BatchNormalization()(conv1) conv1 = Activation('relu')(conv1) conv2 = Convolution3D(96, 3, 3, 3, border_mode='same')(conv1) conv2 = Dropout(p=0.2)(conv2) conv2 = BatchNormalization()(conv2) conv2 = Activation('relu')(conv2) conv3 = Convolution3D(96, 3, 3, 3, border_mode='same')(conv2) conv3 = Dropout(p=0.2)(conv3) conv3 = BatchNormalization()(conv3) conv3 = Activation('relu')(conv3) conv4 = Convolution3D(96, 3, 3, 3, border_mode='same')(conv3) conv4 = Dropout(p=0.2)(conv4) #Transition down 1 conv4 = BatchNormalization()(conv4) conv4 = Activation('relu')(conv4) conv5 = Convolution3D(96, 1, 1, 1, border_mode='same')(conv4) conv5 = Dropout(p=0.2)(conv5) pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv5) #24*24*24 #dense 3 layer 2 pool1 = BatchNormalization()(pool1) pool1 = Activation('relu')(pool1) conv6 = Convolution3D(192, 3, 3, 3, border_mode='same')(pool1) conv6 = Dropout(p=0.2)(conv6) conv6 = BatchNormalization()(conv6) conv6 = Activation('relu')(conv6) conv7 = Convolution3D(192, 3, 3, 3, border_mode='same')(conv6) conv7 = Dropout(p=0.2)(conv7) conv7 = BatchNormalization()(conv7) conv7 = Activation('relu')(conv7) conv8 = Convolution3D(192, 3, 3, 3, border_mode='same')(conv7) conv8 = Dropout(p=0.2)(conv8) #Transition down 2 conv8 = BatchNormalization()(conv8) conv8 = Activation('relu')(conv8) conv9 = Convolution3D(192, 1, 1, 1, border_mode='same')(conv8) conv9 = Dropout(p=0.2)(conv9) pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv9) #12*12*12 #2 layers pool2 = BatchNormalization()(pool2) pool2 = Activation('relu')(pool2) conv10 = Convolution3D(384, 3, 3, 3, border_mode='same')(pool2) conv10 = Dropout(p=0.2)(conv10) conv10 = BatchNormalization()(conv10) conv10 = Activation('relu')(conv10) conv11 = Convolution3D(384, 3, 3, 3, border_mode='same')(conv10) conv11 = Dropout(p=0.2)(conv11) #Transition Up 1 conv11 = BatchNormalization()(conv11) conv11 = Activation('relu')(conv11) up1 = UpSampling3D(size=(2, 2, 2), name="up1")(conv11) #24*24*24 up1 = merge([up1, conv9], mode='concat', concat_axis=-1) conv11 = Convolution3D(384, 1, 1, 1, border_mode='same')(up1) #?? conv11 = Dropout(p=0.2)(conv11) #dense 3 layer 1 conv11 = BatchNormalization()(conv11) conv11 = Activation('relu')(conv11) conv12 = Convolution3D(192, 3, 3, 3, border_mode='same')(conv11) conv12 = Dropout(p=0.2)(conv12) conv12 = BatchNormalization()(conv12) conv12 = Activation('relu')(conv12) conv13 = Convolution3D(192, 3, 3, 3, border_mode='same')(conv12) conv13 = Dropout(p=0.2)(conv13) conv13 = BatchNormalization()(conv13) conv13 = Activation('relu')(conv13) conv14 = Convolution3D(192, 3, 3, 3, border_mode='same')(conv13) conv14 = Dropout(p=0.2)(conv14) #Transition Up 2 conv14 = BatchNormalization()(conv14) conv14 = Activation('relu')(conv14) up2 = UpSampling3D(size=(2, 2, 2), name="up2")(conv14) up2 = merge([up2, conv5], mode='concat', concat_axis=-1) conv15 = Convolution3D(192, 1, 1, 1, border_mode='same')(up2) #?? conv15 = Dropout(p=0.2)(conv15) #dense 3 layer conv15 = BatchNormalization()(conv15) conv15 = Activation('relu')(conv15) conv16 = Convolution3D(96, 3, 3, 3, border_mode='same')(conv15) conv16 = Dropout(p=0.2)(conv16) conv16 = BatchNormalization()(conv16) conv16 = Activation('relu')(conv16) conv17 = Convolution3D(96, 3, 3, 3, border_mode='same')(conv16) conv17 = Dropout(p=0.2)(conv17) conv17 = BatchNormalization()(conv17) conv17 = Activation('relu')(conv17) conv18 = Convolution3D(96, 3, 3, 3, border_mode='same')(conv17) conv18 = Dropout(p=0.2)(conv18) conv18 = BatchNormalization()(conv18) conv18 = Activation('relu')(conv18) conv19 = Convolution3D(1, 1, 1, 1, activation='sigmoid')(conv18) model = Model(input=inputs, output=conv19) model.compile(optimizer=Adam(lr=1.0e-4, decay=1.0e-6), loss=dice_coef_loss, metrics=[dice_coef]) model.summary() return model '''
def build_model(self): latent_dim = 2 intermediate_dim = 128 epsilon_std = 0.01 nb_conv = 3 nb_filters = 64 inputs = Input(shape=(3, self.sequence_size, self.img_size, self.img_size)) x = Convolution3D(32, 3, 3, 3, activation=Relu, border_mode="same")(inputs) x = Convolution3D(64, 3, 3, 3, activation=Relu, border_mode="same")(x) x = MaxPooling3D()(x) x = Convolution3D(128, 3, 3, 3, activation=Relu, border_mode="same")(x) x = Convolution3D(32, 3, 3, 3, activation=Relu, border_mode="same")(x) x = MaxPooling3D()(x) x = Convolution3D(16, 3, 3, 3, activation=Relu, border_mode="same")(x) x = Convolution3D(8, 3, 3, 3, activation=Relu, border_mode="same")(x) flat = Flatten()(x) hidden = Dense(intermediate_dim, activation='relu')(flat) self.z_mean = Dense(latent_dim)(hidden) self.z_log_var = Dense(latent_dim)(hidden) def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(self.batch_size, latent_dim), mean=0., std=epsilon_std) return z_mean + K.exp(z_log_var) * epsilon # note that "output_shape" isn't necessary with the TensorFlow backend # so you could write `Lambda(sampling)([z_mean, z_log_var])` z = Lambda(sampling, output_shape=(latent_dim, ))([self.z_mean, self.z_log_var]) # we instantiate these layers separately so as to reuse them later decoder_hid = Dense(intermediate_dim, activation='relu') decoder_upsample = Dense(nb_filters * 14 * 14, activation='relu') if K.image_dim_ordering() == 'th': output_shape = (self.batch_size, nb_filters, 14, 14) else: output_shape = (self.batch_size, 14, 14, nb_filters) decoder_reshape = Reshape(output_shape[1:]) decoder_deconv_1 = Deconvolution2D(nb_filters, nb_conv, nb_conv, output_shape, border_mode='same', subsample=(1, 1), activation='relu') decoder_deconv_2 = Deconvolution2D(nb_filters, nb_conv, nb_conv, output_shape, border_mode='same', subsample=(1, 1), activation='relu') if K.image_dim_ordering() == 'th': output_shape = (self.batch_size, nb_filters, 29, 29) else: output_shape = (self.batch_size, 29, 29, nb_filters) decoder_deconv_3_upsamp = Deconvolution2D(nb_filters, 3, 3, output_shape, border_mode='valid', subsample=(2, 2), activation='relu') decoder_mean_squash = Convolution2D(1, 2, 2, border_mode='valid', activation='sigmoid') hid_decoded = decoder_hid(z) up_decoded = decoder_upsample(hid_decoded) reshape_decoded = decoder_reshape(up_decoded) deconv_1_decoded = decoder_deconv_1(reshape_decoded) deconv_2_decoded = decoder_deconv_2(deconv_1_decoded) x_decoded_relu = decoder_deconv_3_upsamp(deconv_2_decoded) x_decoded_mean_squash = decoder_mean_squash(x_decoded_relu) vae = Model(inputs, x_decoded_mean_squash) vae.compile(optimizer='rmsprop', loss=self.vae_loss) vae.summary() return vae
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None) -> Model: #期待返回类型为model inputs = Input(shape=input_shape, name="input_1") x = inputs x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), border_mode="same")(x) x = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same', name='conv1', subsample=(1, 1, 1))(x) x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), border_mode='valid', name='pool1')(x) # 2nd layer group x = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same', name='conv2', subsample=(1, 1, 1))(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool2')(x) if USE_DROPOUT: x = Dropout(p=0.3)(x) # 3rd layer group x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3a', subsample=(1, 1, 1))(x) x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3b', subsample=(1, 1, 1))(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool3')(x) if USE_DROPOUT: x = Dropout(p=0.4)(x) # 4th layer group x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4a', subsample=(1, 1, 1))(x) x = Convolution3D( 512, 3, 3, 3, activation='relu', border_mode='same', name='conv4b', subsample=(1, 1, 1), )(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool4')(x) if USE_DROPOUT: x = Dropout(p=0.5)(x) #输出1 last64 = Convolution3D(64, 2, 2, 2, activation="relu", name="last_64")(x) #out_class = Convolution3D(1, 1, 1, 1, activation="sigmoid", name="out_class_last")(last64) #out_class = Flatten(name="out_class")(out_class) #输出2 out_malignancy = Convolution3D(1, 1, 1, 1, activation=None, name="out_malignancy_last")(last64) out_malignancy = Flatten(name="out_sphericiy")(out_malignancy) model = Model(input=inputs, output=out_malignancy) if load_weight_path is not None: model.load_weights(load_weight_path, by_name=False) #编译模型 model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_sphericiy": "mean_absolute_error"}, metrics={"out_sphericiy": [mean_absolute_error]}) model.summary(line_length=140) return model
def voxModel(input_tensor=None, input_shape=None, classes=1): if K.image_data_format() == 'channels_last': bn_axis = 4 else: bn_axis = 1 if input_shape is None: img_input = Input(shape=(224, 224, 96, classes)) x = ZeroPadding3D((4, 4, 4))(img_input) x = Conv3D(64, (7, 7, 7), strides=(2, 2, 2), name='conv1')(x) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2))(x) x = conv_block(x, 3, [64, 64, 128], stage=2, block='a', strides=(1, 1, 1)) x = identity_block(x, 3, [64, 64, 128], stage=2, block='b') x2 = identity_block(x, 3, [64, 64, 128], stage=2, block='c') x = conv_block(x2, 3, [128, 128, 256], stage=3, block='a') x = identity_block(x, 3, [128, 128, 256], stage=3, block='b') x = identity_block(x, 3, [128, 128, 256], stage=3, block='c') x3 = identity_block(x, 3, [128, 128, 256], stage=3, block='d') x = conv_block(x3, 3, [256, 256, 512], stage=4, block='a') x = identity_block(x, 3, [256, 256, 512], stage=4, block='b') x = identity_block(x, 3, [256, 256, 512], stage=4, block='c') x = identity_block(x, 3, [256, 256, 512], stage=4, block='d') x = identity_block(x, 3, [256, 256, 512], stage=4, block='e') x4 = identity_block(x, 3, [256, 256, 512], stage=4, block='f') x = conv_block(x4, 3, [512, 512, 1024], stage=5, block='a') x = identity_block(x, 3, [512, 512, 1024], stage=5, block='b') x = identity_block(x, 3, [512, 512, 1024], stage=5, block='c') x = up_conv_block(x, 3, [1024, 512, 512], stage=6, block='a') x = identity_block(x, 3, [1024, 512, 512], stage=6, block='b') x = identity_block(x, 3, [1024, 512, 512], stage=6, block='c') x = concatenate([x, x4], axis=bn_axis) x = up_conv_block(x, 3, [1024, 256, 256], stage=7, block='a') x = identity_block(x, 3, [1024, 256, 256], stage=7, block='b') x = identity_block(x, 3, [1024, 256, 256], stage=7, block='c') x = identity_block(x, 3, [1024, 256, 256], stage=7, block='d') x = identity_block(x, 3, [1024, 256, 256], stage=7, block='e') x = identity_block(x, 3, [1024, 256, 256], stage=7, block='f') x = concatenate([x, x3], axis=bn_axis) x = up_conv_block(x, 3, [512, 128, 128], stage=8, block='a') x = identity_block(x, 3, [512, 128, 128], stage=8, block='b') x = identity_block(x, 3, [512, 128, 128], stage=8, block='c') x = identity_block(x, 3, [512, 128, 128], stage=8, block='d') x = concatenate([x, x2], axis=bn_axis) x = up_conv_block(x, 3, [256, 64, 64], stage=10, block='a', strides=(1, 1, 1)) x = identity_block(x, 3, [256, 64, 64], stage=10, block='b') x = identity_block(x, 3, [256, 64, 64], stage=10, block='c') x = UpSampling3D(size=(2, 2, 2))(x) x = Conv3D(classes, (3, 3, 3), padding='same', activation='softmax', name='convLast')(x) model = Model(img_input, x, name='voxResnetUnet') model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef]) return model
Y_test = np_utils.to_categorical(Y_test, nb_classes) print('y_train shape:', Y_train.shape) print('y_test shape:', Y_test.shape) model = Sequential() conv1 = Convolution3D(nb_filters_1, kernel_size_1[0], kernel_size_1[1], kernel_size_1[2], border_mode='same', input_shape=input_shape) model.add(conv1) model.add(Activation('relu')) pool1 = MaxPooling3D(pool_size=pool_size_1) model.add(pool1) conv2 = Convolution3D(nb_filters_2, kernel_size_2[0], kernel_size_2[1], kernel_size_2[2], border_mode='same') model.add(conv2) model.add(Activation('relu')) pool2 = MaxPooling3D(pool_size=pool_size_2) model.add(pool2) model.add(Permute((2, 1, 3, 4))) print(model.layers[6].output_shape)
def get_3d_unet_bn(): inputs = Input((cm.slices_3d, cm.img_rows_3d, cm.img_cols_3d, 1)) conv1 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(inputs) bn1 = BatchNormalization(axis=-1)(conv1) act1 = Activation('relu')(bn1) conv1 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act1) bn1 = BatchNormalization(axis=-1)(conv1) act1 = Activation('relu')(bn1) pool1 = MaxPooling3D(pool_size=(2, 2, 2))(act1) conv2 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(pool1) bn2 = BatchNormalization(axis=-1)(conv2) act2 = Activation('relu')(bn2) conv2 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act2) bn2 = BatchNormalization(axis=-1)(conv2) act2 = Activation('relu')(bn2) pool2 = MaxPooling3D(pool_size=(2, 2, 2))(act2) conv3 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(pool2) bn3 = BatchNormalization(axis=-1)(conv3) act3 = Activation('relu')(bn3) conv3 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act3) bn3 = BatchNormalization(axis=-1)(conv3) act3 = Activation('relu')(bn3) pool3 = MaxPooling3D(pool_size=(2, 2, 2))(act3) conv4 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(pool3) bn4 = BatchNormalization(axis=-1)(conv4) act4 = Activation('relu')(bn4) conv4 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act4) bn4 = BatchNormalization(axis=-1)(conv4) act4 = Activation('relu')(bn4) pool4 = MaxPooling3D(pool_size=(2, 2, 2))(act4) conv5 = Conv3D(filters=512, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(pool4) bn5 = BatchNormalization(axis=-1)(conv5) act5 = Activation('relu')(bn5) conv5 = Conv3D(filters=512, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act5) bn5 = BatchNormalization(axis=-1)(conv5) act5 = Activation('relu')(bn5) up6 = merge([UpSampling3D(size=(2, 2, 2))(act5), act4], mode='concat', concat_axis=-1) conv6 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(up6) bn6 = BatchNormalization(axis=-1)(conv6) act6 = Activation('relu')(bn6) conv6 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act6) bn6 = BatchNormalization(axis=-1)(conv6) act6 = Activation('relu')(bn6) up7 = merge([UpSampling3D(size=(2, 2, 2))(act6), act3], mode='concat', concat_axis=-1) conv7 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(up7) bn7 = BatchNormalization(axis=-1)(conv7) act7 = Activation('relu')(bn7) conv7 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act7) bn7 = BatchNormalization(axis=-1)(conv7) act7 = Activation('relu')(bn7) up8 = merge([UpSampling3D(size=(2, 2, 2))(act7), act2], mode='concat', concat_axis=-1) conv8 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(up8) bn8 = BatchNormalization(axis=-1)(conv8) act8 = Activation('relu')(bn8) conv8 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act8) bn8 = BatchNormalization(axis=-1)(conv8) act8 = Activation('relu')(bn8) up9 = merge([UpSampling3D(size=(2, 2, 2))(act8), act1], mode='concat', concat_axis=-1) conv9 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(up9) bn9 = BatchNormalization(axis=-1)(conv9) act9 = Activation('relu')(bn9) conv9 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), border_mode='same')(act9) bn9 = BatchNormalization(axis=-1)(conv9) act9 = Activation('relu')(bn9) conv10 = Conv3D(filters=3, kernel_size=(1, 1, 1), strides=(1, 1, 1), activation='sigmoid')(act9) model = Model(input=inputs, output=conv10) model.compile(optimizer=Adam(lr=1.0e-5), loss="categorical_crossentropy", metrics=["categorical_accuracy"]) return model
def Inception_Inflated3d(include_top=True, weights=None, input_tensor=None, input_shape=None, dropout_prob=0.0, endpoint_logit=True, classes=400): """Instantiates the Inflated 3D Inception v1 architecture. Optionally loads weights pre-trained on Kinetics. Note that when using TensorFlow, for best performance you should set `image_data_format='channels_last'` in your Keras config at ~/.keras/keras.json. The model and the weights are compatible with both TensorFlow and Theano. The data format convention used by the model is the one specified in your Keras config file. Note that the default input frame(image) size for this model is 224x224. # Arguments include_top: whether to include the the classification layer at the top of the network. weights: one of `None` (random initialization) or 'kinetics_only' (pre-training on Kinetics dataset only). or 'imagenet_and_kinetics' (pre-training on ImageNet and Kinetics datasets). input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(NUM_FRAMES, 224, 224, 3)` (with `channels_last` data format) or `(NUM_FRAMES, 3, 224, 224)` (with `channels_first` data format). It should have exactly 3 inputs channels. NUM_FRAMES should be no smaller than 8. The authors used 64 frames per example for training and testing on kinetics dataset Also, Width and height should be no smaller than 32. E.g. `(64, 150, 150, 3)` would be one valid value. dropout_prob: optional, dropout probability applied in dropout layer after global average pooling layer. 0.0 means no dropout is applied, 1.0 means dropout is applied to all features. Note: Since Dropout is applied just before the classification layer, it is only useful when `include_top` is set to True. endpoint_logit: (boolean) optional. If True, the model's forward pass will end at producing logits. Otherwise, softmax is applied after producing the logits to produce the class probabilities prediction. Setting this parameter to True is particularly useful when you want to combine results of rgb model and optical flow model. - `True` end model forward pass at logit output - `False` go further after logit to produce softmax predictions Note: This parameter is only useful when `include_top` is set to True. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. # Returns A Keras model instance. # Raises ValueError: in case of invalid argument for `weights`, or invalid input shape. """ if not (weights in WEIGHTS_NAME or weights is None or os.path.exists(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or %s' % str(WEIGHTS_NAME) + ' ' 'or a valid path to a file containing `weights` values') if weights in WEIGHTS_NAME and include_top and classes != 400: raise ValueError('If using `weights` as one of these %s, with `include_top`' ' as true, `classes` should be 400' % str(WEIGHTS_NAME)) # Determine proper input shape input_shape = _obtain_input_shape( input_shape, default_frame_size=224, min_frame_size=32, default_num_frames=64, min_num_frames=8, data_format=K.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor if K.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = 4 # Downsampling via convolution (spatial and temporal) x = conv3d_bn(img_input, 64, 7, 7, 7, strides=(2, 2, 2), padding='same', name='Conv3d_1a_7x7') # Downsampling (spatial only) x = MaxPooling3D((1, 3, 3), strides=(1, 2, 2), padding='same', name='MaxPool2d_2a_3x3')(x) x = conv3d_bn(x, 64, 1, 1, 1, strides=(1, 1, 1), padding='same', name='Conv3d_2b_1x1') x = conv3d_bn(x, 192, 3, 3, 3, strides=(1, 1, 1), padding='same', name='Conv3d_2c_3x3') # Downsampling (spatial only) x = MaxPooling3D((1, 3, 3), strides=(1, 2, 2), padding='same', name='MaxPool2d_3a_3x3')(x) # Mixed 3b branch_0 = conv3d_bn(x, 64, 1, 1, 1, padding='same', name='Conv3d_3b_0a_1x1') branch_1 = conv3d_bn(x, 96, 1, 1, 1, padding='same', name='Conv3d_3b_1a_1x1') branch_1 = conv3d_bn(branch_1, 128, 3, 3, 3, padding='same', name='Conv3d_3b_1b_3x3') branch_2 = conv3d_bn(x, 16, 1, 1, 1, padding='same', name='Conv3d_3b_2a_1x1') branch_2 = conv3d_bn(branch_2, 32, 3, 3, 3, padding='same', name='Conv3d_3b_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_3b_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 32, 1, 1, 1, padding='same', name='Conv3d_3b_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_3b') # Mixed 3c branch_0 = conv3d_bn(x, 128, 1, 1, 1, padding='same', name='Conv3d_3c_0a_1x1') branch_1 = conv3d_bn(x, 128, 1, 1, 1, padding='same', name='Conv3d_3c_1a_1x1') branch_1 = conv3d_bn(branch_1, 192, 3, 3, 3, padding='same', name='Conv3d_3c_1b_3x3') branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_3c_2a_1x1') branch_2 = conv3d_bn(branch_2, 96, 3, 3, 3, padding='same', name='Conv3d_3c_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_3c_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_3c_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_3c') # Downsampling (spatial and temporal) x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same', name='MaxPool2d_4a_3x3')(x) # Mixed 4b branch_0 = conv3d_bn(x, 192, 1, 1, 1, padding='same', name='Conv3d_4b_0a_1x1') branch_1 = conv3d_bn(x, 96, 1, 1, 1, padding='same', name='Conv3d_4b_1a_1x1') branch_1 = conv3d_bn(branch_1, 208, 3, 3, 3, padding='same', name='Conv3d_4b_1b_3x3') branch_2 = conv3d_bn(x, 16, 1, 1, 1, padding='same', name='Conv3d_4b_2a_1x1') branch_2 = conv3d_bn(branch_2, 48, 3, 3, 3, padding='same', name='Conv3d_4b_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4b_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_4b_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_4b') # Mixed 4c branch_0 = conv3d_bn(x, 160, 1, 1, 1, padding='same', name='Conv3d_4c_0a_1x1') branch_1 = conv3d_bn(x, 112, 1, 1, 1, padding='same', name='Conv3d_4c_1a_1x1') branch_1 = conv3d_bn(branch_1, 224, 3, 3, 3, padding='same', name='Conv3d_4c_1b_3x3') branch_2 = conv3d_bn(x, 24, 1, 1, 1, padding='same', name='Conv3d_4c_2a_1x1') branch_2 = conv3d_bn(branch_2, 64, 3, 3, 3, padding='same', name='Conv3d_4c_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4c_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_4c_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_4c') # Mixed 4d branch_0 = conv3d_bn(x, 128, 1, 1, 1, padding='same', name='Conv3d_4d_0a_1x1') branch_1 = conv3d_bn(x, 128, 1, 1, 1, padding='same', name='Conv3d_4d_1a_1x1') branch_1 = conv3d_bn(branch_1, 256, 3, 3, 3, padding='same', name='Conv3d_4d_1b_3x3') branch_2 = conv3d_bn(x, 24, 1, 1, 1, padding='same', name='Conv3d_4d_2a_1x1') branch_2 = conv3d_bn(branch_2, 64, 3, 3, 3, padding='same', name='Conv3d_4d_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4d_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_4d_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_4d') # Mixed 4e branch_0 = conv3d_bn(x, 112, 1, 1, 1, padding='same', name='Conv3d_4e_0a_1x1') branch_1 = conv3d_bn(x, 144, 1, 1, 1, padding='same', name='Conv3d_4e_1a_1x1') branch_1 = conv3d_bn(branch_1, 288, 3, 3, 3, padding='same', name='Conv3d_4e_1b_3x3') branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_4e_2a_1x1') branch_2 = conv3d_bn(branch_2, 64, 3, 3, 3, padding='same', name='Conv3d_4e_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4e_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 64, 1, 1, 1, padding='same', name='Conv3d_4e_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_4e') # Mixed 4f branch_0 = conv3d_bn(x, 256, 1, 1, 1, padding='same', name='Conv3d_4f_0a_1x1') branch_1 = conv3d_bn(x, 160, 1, 1, 1, padding='same', name='Conv3d_4f_1a_1x1') branch_1 = conv3d_bn(branch_1, 320, 3, 3, 3, padding='same', name='Conv3d_4f_1b_3x3') branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_4f_2a_1x1') branch_2 = conv3d_bn(branch_2, 128, 3, 3, 3, padding='same', name='Conv3d_4f_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_4f_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 128, 1, 1, 1, padding='same', name='Conv3d_4f_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_4f') # Downsampling (spatial and temporal) x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2), padding='same', name='MaxPool2d_5a_2x2')(x) # Mixed 5b branch_0 = conv3d_bn(x, 256, 1, 1, 1, padding='same', name='Conv3d_5b_0a_1x1') branch_1 = conv3d_bn(x, 160, 1, 1, 1, padding='same', name='Conv3d_5b_1a_1x1') branch_1 = conv3d_bn(branch_1, 320, 3, 3, 3, padding='same', name='Conv3d_5b_1b_3x3') branch_2 = conv3d_bn(x, 32, 1, 1, 1, padding='same', name='Conv3d_5b_2a_1x1') branch_2 = conv3d_bn(branch_2, 128, 3, 3, 3, padding='same', name='Conv3d_5b_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_5b_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 128, 1, 1, 1, padding='same', name='Conv3d_5b_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_5b') # Mixed 5c branch_0 = conv3d_bn(x, 384, 1, 1, 1, padding='same', name='Conv3d_5c_0a_1x1') branch_1 = conv3d_bn(x, 192, 1, 1, 1, padding='same', name='Conv3d_5c_1a_1x1') branch_1 = conv3d_bn(branch_1, 384, 3, 3, 3, padding='same', name='Conv3d_5c_1b_3x3') branch_2 = conv3d_bn(x, 48, 1, 1, 1, padding='same', name='Conv3d_5c_2a_1x1') branch_2 = conv3d_bn(branch_2, 128, 3, 3, 3, padding='same', name='Conv3d_5c_2b_3x3') branch_3 = MaxPooling3D((3, 3, 3), strides=(1, 1, 1), padding='same', name='MaxPool2d_5c_3a_3x3')(x) branch_3 = conv3d_bn(branch_3, 128, 1, 1, 1, padding='same', name='Conv3d_5c_3b_1x1') x = layers.concatenate( [branch_0, branch_1, branch_2, branch_3], axis=channel_axis, name='Mixed_5c') if include_top: # Classification block x = AveragePooling3D((2, 7, 7), strides=(1, 1, 1), padding='valid', name='global_avg_pool')(x) x = Dropout(dropout_prob)(x) x = conv3d_bn(x, classes, 1, 1, 1, padding='same', use_bias=True, use_activation_fn=False, use_bn=False, name='Conv3d_6a_1x1') num_frames_remaining = int(x.shape[1]) x = Reshape((num_frames_remaining, classes))(x) # logits (raw scores for each class) x = Lambda(lambda x: K.mean(x, axis=1, keepdims=False), output_shape=lambda s: (s[0], s[2]))(x) if not endpoint_logit: x = Activation('softmax', name='prediction')(x) else: h = int(x.shape[2]) w = int(x.shape[3]) x = AveragePooling3D((2, h, w), strides=(1, 1, 1), padding='valid', name='global_avg_pool')(x) inputs = img_input # create model model = Model(inputs, x, name='i3d_inception') # load weights if weights in WEIGHTS_NAME: if weights == WEIGHTS_NAME[0]: # rgb_kinetics_only if include_top: weights_url = WEIGHTS_PATH['rgb_kinetics_only'] model_name = 'i3d_inception_rgb_kinetics_only.h5' else: weights_url = WEIGHTS_PATH_NO_TOP['rgb_kinetics_only'] model_name = 'i3d_inception_rgb_kinetics_only_no_top.h5' elif weights == WEIGHTS_NAME[1]: # flow_kinetics_only if include_top: weights_url = WEIGHTS_PATH['flow_kinetics_only'] model_name = 'i3d_inception_flow_kinetics_only.h5' else: weights_url = WEIGHTS_PATH_NO_TOP['flow_kinetics_only'] model_name = 'i3d_inception_flow_kinetics_only_no_top.h5' elif weights == WEIGHTS_NAME[2]: # rgb_imagenet_and_kinetics if include_top: weights_url = WEIGHTS_PATH['rgb_imagenet_and_kinetics'] model_name = 'i3d_inception_rgb_imagenet_and_kinetics.h5' else: weights_url = WEIGHTS_PATH_NO_TOP['rgb_imagenet_and_kinetics'] model_name = 'i3d_inception_rgb_imagenet_and_kinetics_no_top.h5' elif weights == WEIGHTS_NAME[3]: # flow_imagenet_and_kinetics if include_top: weights_url = WEIGHTS_PATH['flow_imagenet_and_kinetics'] model_name = 'i3d_inception_flow_imagenet_and_kinetics.h5' else: weights_url = WEIGHTS_PATH_NO_TOP['flow_imagenet_and_kinetics'] model_name = 'i3d_inception_flow_imagenet_and_kinetics_no_top.h5' downloaded_weights_path = get_file(model_name, weights_url, cache_subdir='models') model.load_weights(downloaded_weights_path) if K.backend() == 'theano': layer_utils.convert_all_kernels_in_model(model) if K.image_data_format() == 'channels_first' and K.backend() == 'tensorflow': warnings.warn('You are using the TensorFlow backend, yet you ' 'are using the Theano ' 'image data format convention ' '(`image_data_format="channels_first"`). ' 'For best performance, set ' '`image_data_format="channels_last"` in ' 'your keras config ' 'at ~/.keras/keras.json.') elif weights is not None: model.load_weights(weights) return model
def build_model_2lyr(self, use_bn=False): """ Build a 2 layer version of Cicek et al's 3D U-Net: """ self._title = "UNet3D_brain_2layer" self._title += "_BN" if use_bn else "" input_shape = (self._img_height, self._img_width, self._num_slices, self._img_channels) inputs = Input(input_shape) # Contracting path # Layer 1 conv_contr1 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation="relu", padding="same", name="contr1_1")(inputs) print("conv_contr1 conv1: ", conv_contr1.shape) conv_contr1 = BatchNormalization( axis=-1)(conv_contr1) if use_bn else conv_contr1 conv_contr1 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation="relu", padding="same", name="contr1_2")(conv_contr1) print("conv_contr1 conv2: ", conv_contr1.shape) conv_contr1 = BatchNormalization( axis=-1)(conv_contr1) if use_bn else conv_contr1 pool_contr1 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), name="contr1_mp")(conv_contr1) print("pool_contr1: ", pool_contr1.shape) # "Bottom" layer 2 conv_bottom = Conv3D(filters=64, kernel_size=(3, 3, 3), activation="relu", padding="same", name="bottom1")(pool_contr1) print("conv_bottom conv1: ", conv_bottom.shape) conv_bottom = BatchNormalization( axis=-1)(conv_bottom) if use_bn else conv_bottom conv_bottom = Conv3D(filters=128, kernel_size=(3, 3, 3), activation="relu", padding="same", name="bottom2")(conv_bottom) print("conv_bottom conv2: ", conv_bottom.shape) conv_bottom = BatchNormalization( axis=-1)(conv_bottom) if use_bn else conv_bottom crop_up1 = conv_contr1 # Expansive path: scale_up1 = UpSampling3D(size=(2, 2, 2))(conv_bottom) print("scale_up1: ", scale_up1.shape) print("crop_up1: ", crop_up1.shape) merge_up1 = concatenate([scale_up1, crop_up1], axis=4) print("merge_up1: ", merge_up1.shape) conv_up1 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation="relu", padding="same")(merge_up1) print("conv_up1 conv1: ", conv_up1.shape) conv_up1 = BatchNormalization( axis=-1)(conv_up1) if use_bn else conv_up1 conv_up1 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation="relu", padding="same")(conv_up1) print("conv_up1 conv1: ", conv_up1.shape) conv_up1 = BatchNormalization( axis=-1)(conv_up1) if use_bn else conv_up1 # Final 1x1 conv layer conv_final = Conv3D(filters=self._num_classes, kernel_size=(1, 1, 1))(conv_up1) print("conv_final conv1: ", conv_final.shape) conv_final = Activation("softmax")(conv_final) print("conv_final act: ", conv_final.shape) self._model = Model(inputs=inputs, outputs=conv_final) return self._model
def getModel(stddev): inputdata = Input((1, 64, 64, 64)) conv1 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(inputdata) conv1 = PReLU()(conv1) conv2 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(conv1) conv2 = PReLU()(conv2) conv3 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(conv2) conv3 = PReLU()(conv3) batchnorm1 = BatchNormalization()(conv3) pool1 = MaxPooling3D(pool_size=(2, 2, 2), data_format='channels_first')(batchnorm1) encode1 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(pool1) encode1 = PReLU()(encode1) encode2 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(encode1) encode2 = PReLU()(encode2) encode3 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(encode2) encode3 = PReLU()(encode3) encode4 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(encode3) encode4 = PReLU()(encode4) batchnorm2 = BatchNormalization()(encode4) upsampling1 = UpSampling3D(size=(2, 2, 2), data_format='channels_first')(batchnorm2) finalShape = upsampling1.shape originalShape = conv3.shape cropShape = int(originalShape[2]/2-finalShape[2]/2),int(originalShape[3]/2-\ finalShape[3]/2),int(originalShape[4]/2-finalShape[4]/2) crop1 = Cropping3D(cropping=cropShape, data_format='channels_first')(conv3) concatenate1 = concatenate([upsampling1, crop1], axis=1) dropout1 = Dropout(0.25)(concatenate1) expand1 = Convolution3D(256, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(dropout1) expand1 = PReLU()(expand1) expand2 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(expand1) expand2 = PReLU()(expand2) expand3 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(expand2) expand3 = PReLU()(expand3) expand4 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(expand3) expand4 = PReLU()(expand4) expand5 = Convolution3D(128, kernel_size=3, data_format='channels_first', activation='relu', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(expand4) expand5 = PReLU()(expand5) outputdata = Convolution3D(1, kernel_size=1, data_format='channels_first', activation='sigmoid', padding='valid', use_bias=True, kernel_initializer='glorot_normal')(expand5) model = Model(inputs=inputdata, outputs=outputdata) model.compile(optimizer=Adam(lr=LR), loss=diceCoef) print model.summary() return model
def build_model(self, use_bn=False): """ Build the 4 layer 3D U-Net architecture as defined by Cicek et al: https://arxiv.org/abs/1606.06650 """ self._title = "UNet3D_brain_4layer" self._title += "_BN" if use_bn else "" # Set the input shape input_shape = (self._img_height, self._img_width, self._num_slices, self._img_channels) inputs = Input(input_shape) # Contracting path, from the paper: conv_contr1 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation="relu", padding="same", name="contr1_1")(inputs) conv_contr1 = BatchNormalization()( conv_contr1) if use_bn else conv_contr1 conv_contr1 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation="relu", padding="same", name="contr1_2")(conv_contr1) conv_contr1 = BatchNormalization()( conv_contr1) if use_bn else conv_contr1 pool_contr1 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), name="contr1_mp")(conv_contr1) conv_contr2 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation="relu", padding="same", name="contr2_1")(pool_contr1) conv_contr2 = BatchNormalization()( conv_contr2) if use_bn else conv_contr2 conv_contr2 = Conv3D(filters=128, kernel_size=(3, 3, 3), activation="relu", padding="same", name="contr2_2")(conv_contr2) conv_contr2 = BatchNormalization()( conv_contr2) if use_bn else conv_contr2 pool_contr2 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), name="contr2_mp")(conv_contr2) conv_contr3 = Conv3D(filters=128, kernel_size=(3, 3, 3), activation="relu", padding="same", name="contr3_1")(pool_contr2) conv_contr3 = BatchNormalization()( conv_contr3) if use_bn else conv_contr3 conv_contr3 = Conv3D(filters=256, kernel_size=(3, 3, 3), activation="relu", padding="same", name="contr3_2")(conv_contr3) conv_contr3 = BatchNormalization()( conv_contr3) if use_bn else conv_contr3 pool_contr3 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), name="contr3_mp")(conv_contr3) # "Bottom" layer conv_bottom = Conv3D(filters=256, kernel_size=(3, 3, 3), activation="relu", padding="same", name="bottom1")(pool_contr3) conv_bottom = BatchNormalization()( conv_bottom) if use_bn else conv_bottom conv_bottom = Conv3D(filters=512, kernel_size=(3, 3, 3), activation="relu", padding="same", name="bottom2")(conv_bottom) conv_bottom = BatchNormalization()( conv_bottom) if use_bn else conv_bottom # Crop outputs of each contracting path "layer" for use in their corresponding expansive path "layer" # For brain MRA: # crop_up1 = Cropping3D(cropping=((12, 12), (12, 12), (12, 12)))(conv_contr1) # crop_up2 = Cropping3D(cropping=((4, 4), (4, 4), (4, 4)))(conv_contr2) # crop_up3 = Cropping3D(cropping=((0, 0), (0, 0), (0, 0)))(conv_contr3) # crop_up1 = Cropping3D(cropping=((12, 12), (12, 12), (12, 12)))(conv_contr1) # crop_up2 = Cropping3D(cropping=((4, 4), (4, 4), (4, 4)))(conv_contr2) # crop_up3 = Cropping3D(cropping=((0, 0), (0, 0), (0, 0)))(conv_contr3) crop_up1 = conv_contr1 crop_up2 = conv_contr2 crop_up3 = conv_contr3 # Expansive path: scale_up3 = UpSampling3D(size=(2, 2, 2))(conv_bottom) merge_up3 = concatenate([scale_up3, crop_up3], axis=4) conv_up3 = Conv3D(filters=256, kernel_size=(3, 3, 3), activation="relu", padding="same")(merge_up3) conv_up3 = Conv3D(filters=256, kernel_size=(3, 3, 3), activation="relu", padding="same")(conv_up3) scale_up2 = UpSampling3D(size=(2, 2, 2))(conv_up3) merge_up2 = concatenate([scale_up2, crop_up2], axis=4) conv_up2 = Conv3D(filters=128, kernel_size=(3, 3, 3), activation="relu", padding="same")(merge_up2) conv_up2 = Conv3D(filters=128, kernel_size=(3, 3, 3), activation="relu", padding="same")(conv_up2) scale_up1 = UpSampling3D(size=(2, 2, 2))(conv_up2) merge_up1 = concatenate([scale_up1, crop_up1], axis=4) conv_up1 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation="relu", padding="same")(merge_up1) conv_up1 = Conv3D(filters=64, kernel_size=(3, 3, 3), activation="relu", padding="same")(conv_up1) # Final 1x1 conv layer conv_final = Conv3D(filters=self._num_classes, kernel_size=(1, 1, 1))(conv_up1) conv_final = Activation("softmax")(conv_final) self._model = Model(inputs=inputs, outputs=conv_final) return self._model
def build_model_alt(self, num_layers, n_base_filters, deconvolution, use_bn=False): """ Create a 3D Unet model with a variable number of layers and initial number of filters :param num_layers: number of layers (i.e. number of skip connections + 1) :param n_base_filters: number of filters to use in the first conv layer :param deconvolution: True for Deconvolution3D, False for UpSampling3D :param use_bn: True to use BatchNormalisation, False otherwise :return: Keras model """ POOL_SIZE = (2, 2, 2) POOL_STRIDE = (2, 2, 2) CONV_KERNEL = (3, 3, 3) CONV_STRIDE = (1, 1, 1) DECONV_KERNEL = (2, 2, 2) DECONV_STRIDE = (2, 2, 2) UPSAMPLE_SIZE = (2, 2, 2) FEATURE_AXIS = -1 self._title = "UNet3D_{}layer_{}flt_deconv{}".format( num_layers, n_base_filters, int(deconvolution)) self._title += "_BN" if use_bn else "" inputs = self._input current_layer = inputs layers = list() # Contracting path for layer_ix in range(num_layers): # Two conv layers, note the difference in the number of filters contr_conv1 = Conv3D(filters=n_base_filters * (2**layer_ix), kernel_size=CONV_KERNEL, strides=CONV_STRIDE, padding="same", activation="relu", kernel_initializer="he_normal")(current_layer) if use_bn: contr_conv1 = BatchNormalization( axis=FEATURE_AXIS)(contr_conv1) contr_conv2 = Conv3D(filters=n_base_filters * (2**layer_ix) * 2, kernel_size=CONV_KERNEL, strides=CONV_STRIDE, padding="same", activation="relu", kernel_initializer="he_normal")(contr_conv1) if use_bn: contr_conv2 = BatchNormalization( axis=FEATURE_AXIS)(contr_conv2) # Do not include maxpooling in the final bottom layer if layer_ix < num_layers - 1: current_layer = MaxPooling3D(pool_size=POOL_SIZE, strides=POOL_STRIDE, padding="same")(contr_conv2) layers.append([contr_conv1, contr_conv2, current_layer]) else: current_layer = contr_conv2 layers.append([contr_conv1, contr_conv2]) # Expanding path for layer_ix in range(num_layers - 2, -1, -1): if deconvolution: exp_deconv = Deconvolution3D( filters=current_layer._keras_shape[-1], kernel_size=DECONV_KERNEL, strides=DECONV_STRIDE)(current_layer) else: exp_deconv = UpSampling3D(size=UPSAMPLE_SIZE)(current_layer) concat_layer = Concatenate(axis=FEATURE_AXIS)( [exp_deconv, layers[layer_ix][1]]) current_layer = Conv3D( filters=layers[layer_ix][1]._keras_shape[FEATURE_AXIS], kernel_size=CONV_KERNEL, strides=CONV_STRIDE, padding="same", activation="relu", kernel_initializer="he_normal")(concat_layer) if use_bn: current_layer = BatchNormalization( axis=FEATURE_AXIS)(current_layer) current_layer = Conv3D( filters=layers[layer_ix][1]._keras_shape[FEATURE_AXIS], kernel_size=CONV_KERNEL, strides=CONV_STRIDE, padding="same", activation="relu", kernel_initializer="he_normal")(current_layer) if use_bn: current_layer = BatchNormalization( axis=FEATURE_AXIS)(current_layer) act = Conv3D(self._num_classes, (1, 1, 1), activation="softmax", padding="same", kernel_initializer="he_normal")(current_layer) self._model = Model(inputs=[inputs], outputs=[act]) return self._model
def unet_model_MultiScale(): inputs = Input(config["input_shape"]) conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs) conv1 = Conv3D(32, (3, 3, 3), padding='same')(conv1) conv1 = normalization.BatchNormalization(epsilon=2e-05, axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv1) conv1 = core.Activation('relu')(conv1) pool1 = MaxPooling3D(pool_size=config["pool_size"])(conv1) conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1) conv2 = Conv3D(64, (3, 3, 3), padding='same')(conv2) conv2 = normalization.BatchNormalization(epsilon=2e-05, axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv2) conv2 = core.Activation('relu')(conv2) pool2_1 = MaxPooling3D(pool_size=config["pool_size"])(conv2) conv3_1 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool2_1) conv3_1 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv3_1) pool2_2 = MaxPooling3D(pool_size=(4, 4, 4))(conv2) conv3_2 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool2_2) conv3_2 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv3_2) fuse = concatenate( [UpSampling3D(size=config["pool_size"])(conv3_2), conv3_1], axis=1) conv3_f = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(fuse) up4 = concatenate([UpSampling3D(size=config["pool_size"])(conv3_f), conv2], axis=1) conv4 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up4) conv4 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv4) up5 = concatenate([UpSampling3D(size=config["pool_size"])(conv4), conv1], axis=1) conv5 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(up5) conv5 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(conv5) conv6 = Conv3D(config["n_labels"], (1, 1, 1))(conv5) conv6 = core.Reshape((1, out_w * out_w * out_w))(conv6) conv6 = core.Permute((2, 1))(conv6) #conv6 = act = Activation('sigmoid')(conv6) model = Model(inputs=inputs, outputs=act) #model.compile(optimizer=Adam(lr=config["initial_learning_rate"]), loss='categorical_crossentropy',metrics=['fbeta_score']) model.compile(optimizer=Adam(lr=config["initial_learning_rate"]), loss=dice_coef_loss, metrics=[dice_coef]) return model
def dense_net(nb_layers, growth_rate=12, nb_filter=64, bottleneck=True, reduction=0.1, dropout_rate=None, subsample_initial_block=True,classes=2): inputs = Input(shape=(280, 280, 16, 1),name='input') print("0 :inputs shape:", inputs.shape) # 设定每个denseblock中convblock的数量:nb_layers = [3,3,3] concat_axis = -1 # 设定concat的轴(即叠加的轴) bn_axis = -1 # 设定BN的轴(即叠加的轴) nb_dense_block = nb_layers.__len__() # nb_dense_block :denseblock的数量,需要和nb_layers对应,nb_layers = [3,3,3],则nb_dense_block=3,即3个stage,每个stage有3个dense_block final_nb_layer = nb_layers[-1] compression = 1.0 - reduction # denseblock的通道衰减率,即实际输出通道数=原输出通道数x通道衰减率 # Initial convolution ======================================================================================= if subsample_initial_block: initial_kernel = (7, 7, 7) initial_strides = (2, 2, 2) # 这个地方需要跑一下实验看一下222好还是221好 else: initial_kernel = (3, 3, 3) initial_strides = (1, 1, 1) x = Conv3D(nb_filter, initial_kernel, kernel_initializer='he_normal', padding='same', strides=initial_strides, use_bias=False, name = 'init_conv')(inputs) x = BatchNormalization(axis=bn_axis, epsilon=1.1e-5, name='init_bn')(x) x = Activation('relu')(x) if subsample_initial_block: x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x) print("0 :Initial conv shape:", x.shape) # Initial convolution finished ================================================================================ # Add dense blocks start ================================================================================== for block_idx in range(nb_dense_block - 1): #print('db:','db'+str(block_idx)) x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, concat_axis=concat_axis, bn_axis=bn_axis, bottleneck=bottleneck, dropout_rate=dropout_rate, grow_nb_filters=True, dbname = 'db'+str(block_idx)) print(block_idx+1, ":dense_block shape:", x.shape) x = __transition_block(x, nb_filter, compression=compression, concat_axis=concat_axis, bias_allow=False, tbname = 'tb'+str(block_idx)) print(block_idx+1, ":transition_block shape:", x.shape) nb_filter = int(nb_filter * compression) # Add dense blocks finish ================================================================================== # The last dense_block does not have a transition_block x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate, concat_axis=concat_axis, bn_axis=bn_axis, bottleneck=bottleneck, dropout_rate=dropout_rate, grow_nb_filters=True, dbname = 'db_last') print(nb_dense_block, ":dense_block shape:", x.shape) x = BatchNormalization(axis=bn_axis, epsilon=1.1e-5, name='bn_last')(x) x = Activation('relu')(x) out = GlobalAveragePooling3D(data_format='channels_last')(x) print("GApooling shape:", out.shape) out_drop = Dropout(rate=0.3)(out) if classes == 1: output = Dense(classes, activation='sigmoid', name='fc1')(out_drop) print("predictions1 shape:", output.shape, 'activition:sigmoid') else: output = Dense(classes, activation='softmax', name='fc1')(out_drop) print("predictions2 shape:", output.shape, 'activition:softmax') #out = Dense(classes, name='fc1')(out_drop) #print("out shape:", out.shape) #output = Activation(activation='sigmoid')(out) model = Model(input=inputs, output=output) #mean_squared_logarithmic_error or binary_crossentropy #model.compile(optimizer=SGD(lr=1e-6, momentum=0.9), loss=EuiLoss, metrics=[y_t, y_pre, Acc] ) return model
def segmentation_model(): """ 3D U-net model, using very small convolutional kernels """ conv_size = (3, 3, 3) pool_size = (2, 2, 2) inputs = Input(shape=(144, 192, 128, 2)) conv1 = Conv3D(16, conv_size, activation='relu', padding='same')(inputs) conv1 = Conv3D(16, conv_size, activation='relu', padding='same')(conv1) bn1 = BatchNormalization()(conv1) pool1 = MaxPooling3D(pool_size=pool_size)(bn1) conv2 = Conv3D(32, conv_size, activation='relu', padding='same')(pool1) conv2 = Conv3D(32, conv_size, activation='relu', padding='same')(conv2) bn2 = BatchNormalization()(conv2) pool2 = MaxPooling3D(pool_size=pool_size)(bn2) conv3 = Conv3D(64, conv_size, activation='relu', padding='same')(pool2) conv3 = Conv3D(64, conv_size, activation='relu', padding='same')(conv3) bn3 = BatchNormalization()(conv3) pool3 = MaxPooling3D(pool_size=pool_size)(bn3) conv4 = Conv3D(128, conv_size, activation='relu', padding='same')(pool3) drop4 = Dropout(0.5)(conv4) conv4 = Conv3D(128, conv_size, activation='relu', padding='same')(drop4) drop4 = Dropout(0.5)(conv4) bn4 = BatchNormalization()(drop4) pool4 = MaxPooling3D(pool_size=pool_size)(bn4) conv5 = Conv3D(128, conv_size, activation='relu', padding='same')(pool4) drop5 = Dropout(0.5)(conv5) conv5 = Conv3D(128, conv_size, activation='relu', padding='same')(drop5) drop5 = Dropout(0.5)(conv5) bn5 = BatchNormalization()(drop5) up8 = UpSampling3D(size=pool_size)(bn5) concat8 = concatenate([up8, bn4]) conv8 = Conv3D(64, conv_size, activation='relu', padding='same')(concat8) conv8 = Conv3D(64, conv_size, activation='relu', padding='same')(conv8) bn8 = BatchNormalization()(conv8) up9 = UpSampling3D(size=pool_size)(bn4) concat9 = concatenate([up9, bn3]) conv9 = Conv3D(64, conv_size, activation='relu', padding='same')(concat9) conv9 = Conv3D(64, conv_size, activation='relu', padding='same')(conv9) bn9 = BatchNormalization()(conv9) up10 = UpSampling3D(size=pool_size)(bn9) concat10 = concatenate([up10, bn2]) conv10 = Conv3D(32, conv_size, activation='relu', padding='same')(concat10) conv10 = Conv3D(32, conv_size, activation='relu', padding='same')(conv10) bn10 = BatchNormalization()(conv10) up11 = UpSampling3D(size=pool_size)(bn10) concat11 = concatenate([up11, bn1]) conv11 = Conv3D(16, conv_size, activation='relu', padding='same')(concat11) bn11 = BatchNormalization()(conv11) # need as many output channel as tissue classes conv14 = Conv3D(n_tissues, (1, 1, 1), activation='softmax', padding='valid')(bn11) model = Model(input=[inputs], output=[conv14]) return model
padding='valid', activation="relu", data_format='channels_first')(inputs_layer) conv_layer2 = Convolution3D(filters=64, padding='valid', kernel_size=(3, 3, 3), activation="relu", data_format='channels_first')(conv_layer1) # Dropout = 0.2 dropout_layer2 = Dropout(0.2)(conv_layer2) # MaxPooling3D = 2 pooling_drop2 = MaxPooling3D(pool_size=(2, 2, 2), data_format='channels_first', padding='same')(dropout_layer2) # Dropout = 0.4 dropout_pooling2 = Dropout(0.4)(pooling_drop2) # Flatten flatten_layer = Flatten()(dropout_pooling2) # Fully connected = 512 dense_drop2 = Dense(512, activation='relu')(flatten_layer) # Dropout = 0.4 dropout_dense2 = Dropout(0.4)(dense_drop2) # Output
Conv3D(input_shape=(32, 32, 32, 1), filters=32, kernel_size=(5, 5, 5), strides=(2, 2, 2))) model.add( Conv3D(32, kernel_size=(3, 3, 3), activation='relu', input_shape=(30, 30, 30, 1))) model.add(Conv3D(64, (3, 3, 3), activation='relu')) model.add(Activation(LeakyReLU(alpha=0.1))) #model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Dropout(rate=0.3)) model.add(Conv3D(filters=30, kernel_size=(3, 3, 3))) model.add(Activation(LeakyReLU(alpha=0.1))) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None)) model.add(Dropout(rate=0.4)) model.add(Flatten()) model.add(Dense(units=128, activation='relu')) model.add(Dropout(rate=0.5)) model.add(Dense(units=40, kernel_initializer='normal', activation='relu')) model.add(Activation("softmax")) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=["accuracy"]) #model.load_weights("modelnet10.npz") print(model.summary()) X_train = X_train.reshape(-1, 32, 32, 32) #x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
def get_3d_cnn(): inputs = Input((cm.slices_3d, cm.img_rows_3d, cm.img_cols_3d, 1), name='layer_no_0_input') conv1 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_1_conv')(inputs) conv1 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_2_conv')(conv1) pool1 = MaxPooling3D(pool_size=(2, 2, 2), name='layer_no_3')(conv1) conv2 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_4_conv')(pool1) conv2 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_5_conv')(conv2) pool2 = MaxPooling3D(pool_size=(2, 2, 2), name='layer_no_6')(conv2) conv3 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_7_conv')(pool2) conv3 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_8_conv')(conv3) pool3 = MaxPooling3D(pool_size=(2, 2, 2), name='layer_no_9')(conv3) conv4 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_10_conv')(pool3) conv4 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_11_conv')(conv4) pool4 = MaxPooling3D(pool_size=(2, 2, 2), name='layer_no_12')(conv4) conv5 = Conv3D(filters=512, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_13_conv')(pool4) conv5 = Conv3D(filters=512, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_14_conv')(conv5) up6 = UpSampling3D(size=(2, 2, 2), name='layer_no_15')(conv5) conv6 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_17_conv')(up6) conv6 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_18_conv')(conv6) up7 = UpSampling3D(size=(2, 2, 2), name='layer_no_19')(conv6) conv7 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_21_conv')(up7) conv7 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_22_conv')(conv7) up8 = UpSampling3D(size=(2, 2, 2), name='layer_no_23')(conv7) conv8 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_25_conv')(up8) conv8 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_26_conv')(conv8) up9 = UpSampling3D(size=(2, 2, 2), name='layer_no_27')(conv8) conv9 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_29_conv')(up9) conv9 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same', name='layer_no_30_last')(conv9) conv10 = Conv3D(filters=3, kernel_size=(1, 1, 1), strides=(1, 1, 1), activation='sigmoid', name='layer_no_31_output')(conv9) model = Model(input=inputs, output=conv10) # weights = np.array([1.0, 1.0, 1.0]) # loss = lf.weighted_categorical_crossentropy_loss(weights) # model.compile(optimizer=Adam(lr=1.0e-5), loss="categorical_crossentropy", metrics=["categorical_accuracy"]) # model.compile(optimizer=Adam(lr=1.0e-5), loss=loss, metrics=["categorical_accuracy"]) model.compile(optimizer=Adam(lr=1.0e-5), loss="categorical_crossentropy", metrics=["categorical_accuracy"]) # model.compile(optimizer=Adam(lr=1.0e-5), loss=lf.binary_crossentropy_loss, metrics=[lf.binary_crossentropy]) return model
# test visualize shp = list(x_train.shape) shp = shp[2:] input_dim = x_train.shape[1:] # visualize_results(x_train[0, 0, :, :, :], y_train[0, 0, :, :, :], y_train[0, 0, :, :, :], shp) ######################### encoding/decoding - segmentation ######################### conv_channel_1 = 8 conv_channel_2 = 20 kern_size = 3 segmen_patches = Input(shape=input_dim) x0 = Conv3D(conv_channel_1, kernel_size=kern_size, input_shape=input_dim, data_format='channels_first', padding='same', activation='relu')(segmen_patches) x1 = MaxPooling3D((2, 2, 2), data_format='channels_first')(x0) x2 = Conv3D(conv_channel_2, kernel_size=kern_size, data_format='channels_first', padding='same', activation='relu')(x1) encoded_semantic = MaxPooling3D((2, 2, 2), data_format='channels_first')(x2) x3 = Conv3D(conv_channel_2, kernel_size=kern_size, data_format='channels_first', padding='same', activation='relu')(encoded_semantic) x4 = UpSampling3D(size=(2, 2, 2), data_format='channels_first')(x3) x5 = Concatenate(axis=1)([x4, x2]) x6 = Conv3D(conv_channel_1, kernel_size=kern_size, data_format='channels_first', padding='same', activation='relu')(x5) x7 = UpSampling3D(size=(2, 2, 2), data_format='channels_first')(x6) x8 = Concatenate(axis=1)([x7, x0]) segmen_recons = Conv3D(1, kernel_size=kern_size, data_format='channels_first', padding='same', activation='sigmoid')(x8) ######################### encoding/decoding - intensity ######################### intensity_patches = Input(shape=input_dim) x00 = Conv3D(conv_channel_1, kernel_size=kern_size, input_shape=input_dim,
def get_3d_wnet(opti): inputs = Input((cm.slices_3d, cm.img_rows_3d, cm.img_cols_3d, 1)) conv1 = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(inputs) conv1 = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv1) pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) conv1p = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(inputs) conv1p = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv1p) pool1p = MaxPooling3D(pool_size=(2, 2, 2))(conv1p) conv2 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(pool1) conv2 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv2) pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) conv2p = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(pool1p) conv2p = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv2p) pool2p = MaxPooling3D(pool_size=(2, 2, 2))(conv2p) conv3 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(pool2) conv3 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv3) pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) conv3p = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(pool2p) conv3p = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv3p) pool3p = MaxPooling3D(pool_size=(2, 2, 2))(conv3p) conv4 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(pool3) conv4 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv4) pool4 = MaxPooling3D(pool_size=(2, 2, 2))(conv4) conv4p = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(pool3p) conv4p = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv4p) pool4p = MaxPooling3D(pool_size=(2, 2, 2))(conv4p) conv5 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(pool4) conv5 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv5) conv5p = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(pool4p) conv5p = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv5p) up6 = merge([ UpSampling3D(size=(2, 2, 2))(conv5), conv4, UpSampling3D(size=(2, 2, 2))(conv5p), conv4p ], mode='concat', concat_axis=-1) conv6 = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(up6) conv6 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv6) conv6p = Conv3D(filters=256, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(up6) conv6p = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv6p) up7 = merge([ UpSampling3D(size=(2, 2, 2))(conv6), conv3, UpSampling3D(size=(2, 2, 2))(conv6p), conv3p ], mode='concat', concat_axis=-1) conv7 = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(up7) conv7 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv7) conv7p = Conv3D(filters=128, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(up7) conv7p = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv7p) up8 = merge([ UpSampling3D(size=(2, 2, 2))(conv7), conv2, UpSampling3D(size=(2, 2, 2))(conv7p), conv2p ], mode='concat', concat_axis=-1) conv8 = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(up8) conv8 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv8) conv8p = Conv3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(up8) conv8p = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv8p) up9 = merge([ UpSampling3D(size=(2, 2, 2))(conv8), conv1, UpSampling3D(size=(2, 2, 2))(conv8p), conv1p ], mode='concat', concat_axis=-1) conv9 = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(up9) conv9 = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv9) conv9p = Conv3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(up9) conv9p = Conv3D(filters=16, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu', border_mode='same')(conv9p) conv10a = merge([conv9, conv9p], mode='concat', concat_axis=-1) conv10 = Conv3D(filters=3, kernel_size=(1, 1, 1), strides=(1, 1, 1), activation='sigmoid')(conv10a) model = Model(input=inputs, output=conv10) weights = np.array([1, 100, 100]) loss = lf.weighted_categorical_crossentropy_loss(weights) # model.compile(optimizer=Adam(lr=1.0e-5), loss="categorical_crossentropy", metrics=["categorical_accuracy"]) model.compile(optimizer=opti, loss=loss, metrics=["categorical_accuracy"]) # model.compile(optimizer=opti, loss="categorical_crossentropy", metrics=["categorical_accuracy"]) return model
def build_model(self, img_shape=(32, 168, 168), learning_rate=5e-5): input_img = Input((*img_shape, 1), name='img_inp') kernel_init = 'he_normal' sfs = 8 # start filter size bn = True do = True conv1, conv1_b_m = self.downLayer(input_img, sfs, 1, bn) conv2, conv2_b_m = self.downLayer(conv1, sfs * 2, 2, bn) conv3 = Conv3D(sfs * 4, (3, 3, 3), activation='relu', padding='same', kernel_initializer=kernel_init, name='conv' + str(3) + '_1')(conv2) if bn: conv3 = BatchNormalization()(conv3) conv3 = Conv3D(sfs * 8, (3, 3, 3), activation='relu', padding='same', kernel_initializer=kernel_init, name='conv' + str(3) + '_2')(conv3) if bn: conv3 = BatchNormalization()(conv3) pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) conv4 = Conv3D(sfs * 16, (3, 3, 3), activation='relu', padding='same', kernel_initializer=kernel_init, name='conv4_1')(pool3) if bn: conv4 = BatchNormalization()(conv4) if do: conv4 = Dropout(0.5, seed=4, name='Dropout_' + str(4))(conv4) conv4 = Conv3D(sfs * 16, (3, 3, 3), activation='relu', padding='same', kernel_initializer=kernel_init, name='conv4_2')(conv4) if bn: conv4 = BatchNormalization()(conv4) # conv5 = upLayer(conv4, conv3_b_m, sfs*16, 5, bn, do) up1 = Conv3DTranspose(sfs * 16, (2, 2, 2), strides=(2, 2, 2), activation='relu', padding='same', name='up' + str(5))(conv4) up1 = concatenate([up1, conv3]) conv5 = Conv3D(int(sfs * 8), (3, 3, 3), activation='relu', padding='same', kernel_initializer=kernel_init, name='conv' + str(5) + '_1')(up1) if bn: conv5 = BatchNormalization()(conv5) if do: conv5 = Dropout(0.5, seed=5, name='Dropout_' + str(5))(conv5) conv5 = Conv3D(int(sfs * 8), (3, 3, 3), activation='relu', padding='same', kernel_initializer=kernel_init, name='conv' + str(5) + '_2')(conv5) if bn: conv5 = BatchNormalization()(conv5) conv6 = self.upLayer(conv5, conv2_b_m, sfs * 8, 6, bn, do) conv7 = self.upLayer(conv6, conv1_b_m, sfs * 4, 7, bn, do) conv_out = Conv3D(3, (1, 1, 1), activation='softmax', name='conv_final_softmax')(conv7) bg = Lambda(lambda x: x[:, :, :, :, 0], name='bg')(conv_out) z1 = Lambda(lambda x: x[:, :, :, :, 1], name='z1')(conv_out) z2 = Lambda(lambda x: x[:, :, :, :, 2], name='z2')(conv_out) # optimizer = AdamWithWeightnorm(lr=learning_rate, beta_1=0.9, beta_2=0.999) optimizer = Adam(lr=learning_rate) # TODO: settings of optimizer p_model = Model(input_img, [bg, z1, z2]) p_model.compile(optimizer=optimizer, loss={ 'bg': self.dice_loss, 'z1': self.dice_loss, 'z2': self.dice_loss }, metrics={ 'bg': self.dice_coef, 'z1': self.dice_coef, 'z2': self.dice_coef }) return p_model
def get_unet(): inputs = Input(shape=(img_depth, img_rows, img_cols, 1)) conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs) conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv1) pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1) conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv2) pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool2) conv3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv3) pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(pool3) conv4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv4) pool4 = MaxPooling3D(pool_size=(2, 2, 2))(conv4) conv5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(pool4) conv5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(conv5) up6 = concatenate([ Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding='same')(conv5), conv4 ], axis=4) conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(up6) conv6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conv6) up7 = concatenate([ Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding='same')(conv6), conv3 ], axis=4) conv7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(up7) conv7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conv7) up8 = concatenate([ Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding='same')(conv7), conv2 ], axis=4) conv8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up8) conv8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv8) up9 = concatenate([ Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same')(conv8), conv1 ], axis=4) conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up9) conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv9) conv10 = Conv3D(1, (1, 1, 1), activation='sigmoid')(conv9) model = Model(inputs=[inputs], outputs=[conv10]) model.summary() # don't mix tensor flow API and Keras API model.compile(optimizer=Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.000000199), loss='binary_crossentropy', metrics=['accuracy']) return model
def model1(): # Input layer input = Input(shape=(dataset_image_size_x, dataset_image_size_y, dataset_image_size_z, 1)) # convolutional layer 1 conv_1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same', input_shape=(32, 32, 32, 1), name="conv_1_c1")(input) # conv_1 = Dropout(0.2, name="conv_1_d1")(conv_1) conv_1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same', name="conv_1_c2")(conv_1) # conv_1 = BatchNormalization(name="conv_1_n1")(conv_1) pool_1 = MaxPooling3D((2, 2, 2), name="pool_1_p1")(conv_1) # convolutional layer 2 conv_2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same', name="conv_2_c1")(pool_1) # conv_2 = Dropout(0.2, name="conv_2_d1")(conv_2) conv_2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same', name="conv_2_c2")(conv_2) pool_2 = MaxPooling3D((2, 2, 2), name="pool_2_p1")(conv_2) # convolutional layer 3 conv_3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same', name="conv_3_c1")(pool_2) # conv_3 = Dropout(0.2, name="conv_3_d1")(conv_3) conv_3 = Conv3D(128, (3, 3, 3), activation='relu', padding='same', name="conv_3_c2")(conv_3) pool_3 = MaxPooling3D((2, 2, 2), name="pool_3_p1")(conv_3) # convolutional layer 4 conv_4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same', name="conv_4_c1")(pool_3) conv_4 = Conv3D(256, (3, 3, 3), activation='relu', padding='same', name="conv_4_c2")(conv_4) conv_4 = Dropout(0.4, name="conv_4_d1")(conv_4) pool_4 = MaxPooling3D((2, 2, 2), name="pool_4_p1")(conv_4) # convolutional layer 5 conv_5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same', name="conv_5_c1")(pool_4) conv_5 = Conv3D(512, (3, 3, 3), activation='relu', padding='same', name="conv_5_c2")(conv_5) conv_5 = Dropout(0.4, name="conv_5_d1")(conv_5) # Up sampling 5 up_5 = UpSampling3D((2, 2, 2), name="up_5_u1")(conv_5) up_5 = Conv3D(256, (3, 3, 3), activation='relu', padding='same', name="up_5_c1")(up_5) # Merge layer 4 and up 5 merge_4 = concatenate([conv_4, up_5], axis=4, name="merge_4") # convolutional layer 6 conv_6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same', name="conv_6_c1")(merge_4) conv_6 = Conv3D(256, (3, 3, 3), activation='relu', padding='same', name="conv_6_c2")(conv_6) # Up sampling 4 up_4 = UpSampling3D((2, 2, 2), name="up_4_u1")(conv_6) up_4 = Conv3D(128, (3, 3, 3), activation='relu', padding='same', name="up_4_c1")(up_4) # Merge layer 3 and up 4 merge_3 = concatenate([conv_3, up_4], axis=4, name="merge_3") # convolutional layer 7 conv_7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same', name="conv_7_c1")(merge_3) conv_7 = Conv3D(128, (3, 3, 3), activation='relu', padding='same', name="conv_7_c2")(conv_7) # Up sampling 3 up_3 = UpSampling3D((2, 2, 2), name="up_3_u1")(conv_7) up_3 = Conv3D(64, (3, 3, 3), activation='relu', padding='same', name="up_3_c1")(up_3) # Merge layer 2 and up 3 merge_2 = concatenate([conv_2, up_3], axis=4, name="merge_2") # convolutional layer 8 conv_8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same', name="conv_8_c1")(merge_2) conv_8 = Conv3D(64, (3, 3, 3), activation='relu', padding='same', name="conv_8_c2")(conv_8) # Up sampling 2 up_2 = UpSampling3D((2, 2, 2), name="up_2_u1")(conv_8) up_2 = Conv3D(32, (3, 3, 3), activation='relu', padding='same', name="up_2_c1")(up_2) # Merge layer 1 and up 2 merge_1 = concatenate([conv_1, up_2], axis=4, name="merge_1") # convolutional layer 9 conv_9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same', name="conv_9_c1")(merge_1) conv_9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same', name="conv_9_c2")(conv_9) conv_9 = Conv3D(2, (3, 3, 3), activation='relu', padding='same', name="conv_9_c3")(conv_9) # convolutional layer 10 conv_10 = Conv3D(1, 1, activation = 'sigmoid', name="conv_10_c1")(conv_9) model = Model(input=input, output=conv_10) # model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy']) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return model
def resnet_3D_v1(input_dim, mode='train'): bn_axis = 3 if mode == 'train': inputs = Input(shape=input_dim, name='input') else: # inputs = Input(shape=(input_dim[0], None, input_dim[-1]), name='input') inputs = Input(shape=input_dim, name='input') # =============================================== # Convolution Block 1 # =============================================== x1 = Conv3D(64, (1, 1, 1), kernel_initializer='orthogonal', use_bias=False, trainable=True, kernel_regularizer=l2(weight_decay), padding='same', name='conv1_1/3x3_s1')(inputs) x1 = BatchNormalization(axis=bn_axis, name='conv1_1/3x3_s1/bn', trainable=True)(x1) x1 = Activation('relu')(x1) x1 = MaxPooling3D((1, 1, 1), strides=(1, 1, 1))(x1) # =============================================== # Convolution Section 2 # =============================================== x2 = conv_block_3D(x1, 3, [48, 48, 96], stage=2, block='a', strides=(1, 1, 1), trainable=True) x2 = identity_block_3D(x2, 3, [48, 48, 96], stage=2, block='b', trainable=True) # =============================================== # Convolution Section 3 # =============================================== x3 = conv_block_3D(x2, 3, [96, 96, 128], stage=3, block='a', trainable=True) x3 = identity_block_3D(x3, 3, [96, 96, 128], stage=3, block='b', trainable=True) x3 = identity_block_3D(x3, 3, [96, 96, 128], stage=3, block='c', trainable=True) # =============================================== # Convolution Section 4 # =============================================== x4 = conv_block_3D(x3, 3, [128, 128, 256], stage=4, block='a', trainable=True) x4 = identity_block_3D(x4, 3, [128, 128, 256], stage=4, block='b', trainable=True) x4 = identity_block_3D(x4, 3, [128, 128, 256], stage=4, block='c', trainable=True) # =============================================== # Convolution Section 5 # =============================================== x5 = conv_block_3D(x4, 3, [256, 256, 512], stage=5, block='a', trainable=True) x5 = identity_block_3D(x5, 3, [256, 256, 512], stage=5, block='b', trainable=True) x5 = identity_block_3D(x5, 3, [256, 256, 512], stage=5, block='c', trainable=True) y = MaxPooling3D((1, 1, 1), strides=(1, 1, 1), name='mpool2')(x5) return inputs, y
def resnext_or(classes=2): inputs = Input(shape=(280, 280, 16, 1), name='input1') # 256*256*128 print("input shape:", inputs.shape) # (?, 140, 140, 16, 64) out = Conv3D(64, 7, strides=(2, 2, 1), padding='same', kernel_initializer='he_normal', use_bias=False, name='conv1')(inputs) print("conv0 shape:", out.shape) #(?, 140, 140, 16, 64) out = BatchNormalization(axis=-1, epsilon=1e-6, name='bn1')(out) out = Activation('relu')(out) out = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding='same')(out) print("pooling1 shape:", out.shape) #(?, 70, 70, 16, 64) out = conv_block_or( out, [64, 64, 256], name='L1_block1' ) # 一定是[n,n,Xn]这个形式(因为有分组卷积),X可取任意值,且输出通道数为Xn,另外n最好是32的整数(因为分组卷积是分32组的,当然这个可以自己改) print("conv1 shape:", out.shape) out = identity_block( out, [64, 64, 256], name='L1_block2' ) # 一定是[n,n,a]的形式,a一定要等于上一个conv_block或identity_block的输出通道,identity_block的输入输出通道相同。 out = identity_block(out, [64, 64, 256], name='L1_block3') out = conv_block_or(out, [128, 128, 512], name='L2_block1') print("conv2 shape:", out.shape) out = identity_block(out, [128, 128, 512], name='L2_block2') out = identity_block(out, [128, 128, 512], name='L2_block3') out = identity_block(out, [128, 128, 512], name='L2_block4') out = conv_block_or(out, [256, 256, 1024], name='L3_block1') print("conv3 shape:", out.shape) out = identity_block(out, [256, 256, 1024], name='L3_block2') out = identity_block(out, [256, 256, 1024], name='L3_block3') out = identity_block(out, [256, 256, 1024], name='L3_block4') out = identity_block(out, [256, 256, 1024], name='L3_block5') out = identity_block(out, [256, 256, 1024], name='L3_block6') out = conv_block_or(out, [512, 512, 2048], name='L4_block1') print("conv4 shape:", out.shape) out = identity_block(out, [512, 512, 2048], name='L4_block2') out = identity_block(out, [512, 512, 2048], name='L4_block3') out = GlobalAveragePooling3D(data_format='channels_last')(out) print("Gpooling shape:", out.shape) out_drop = Dropout(rate=0.3)(out) if classes == 1: output = Dense(classes, activation='sigmoid', use_bias=use_bias_flag, name='fc1')(out_drop) print("predictions1 shape:", output.shape, 'activition:sigmoid') else: output = Dense(classes, activation='softmax', use_bias=use_bias_flag, name='fc1')(out_drop) print("predictions2 shape:", output.shape, 'activition:softmax') #out = Dense(classes, name = 'fc1')(out_drop) #print("out shape:", out.shape) #out = Dense(1, name = 'fc1')(out) #output = Activation(activation = 'sigmoid')(out) model = Model(input=inputs, output=output) #mean_squared_logarithmic_error or binary_crossentropy #model.compile(optimizer=SGD(lr = 1e-6, momentum = 0.9), loss = EuiLoss, metrics= [y_t, y_pre, Acc] ) return model
def build(self, X_train, y_train, dropout=0.6, dropout_recurrent=0.3, dropout_conv=0.4, epochs=50, optimizer='rmsprop', learning_rate=0.01, batch_size=1, loss='mean_squared_error', batch_norm=True, conv_activation='tanh', conv_filters=[16, 8, 4], conv_kernels=[7, 5, 3], conv_pooling=[2, 2, 2, 0], recurrent_activation='hard_sigmoid', padding='same', dense_nodes=[1024, 512, 256], dense_activation='relu', patience=0, validation_split=0.1, validation_data=None, verbose=1, invalidate=False, tensorboard=False, numerical=True, dense_kernel_regularizer=('L2', 0.02), conv_kernel_regularizer=('L2', 0.02), conv_recurrent_regularizer=('L2', 0.02), lr_plateau=(0.1, 5, 0.0001), cache_id=None, evaluate=None, version="E1"): """ Build a stateless LSTM model :X_train: Training features :y_train: Training outcomes :dropout: How much dropout to use after dense layers :dropout_recurrent: How much recurrent dropout to use in ConvLSTM2D :dropout_conv: How much dropout to use after convolutional layers :epochs: How many epochs to train for :optimizer: The optimizer to use :learning_rate: The learning rate to use with the optimizer :batch_size: The batch size of training :loss: The loss function to use :batch_norm: Whether to use batch normalization :conv_activation: The activation to use in the convolutional layers :conv_filters: The number of filters for all convolutional layers as a list :conv_kernels: The dimensions of the kernels for all convolutional layers as a list :conv_pooling: Dimensions of the max pooling layers => one after each conv layer, final one before flatten :recurrent_activation: The activation for the LSTM recurrent part of ConvLSTM2D :padding: Whether to apply padding or not :numerical: Whether to train classes or numerical output :dense_nodes: The number of dense nodes for all dense layers as a list :dense_activation: The activation to use in all dense nodes except the final one :verbose: The level of logging to be used :invalidate: Whether the cache should be invalidated :patience: How much patience to use for early stopping :tensorboard: Whether tensorboard should be used :validation_split: How much of the data to keep for validation :conv_kernel_regularizer: Regularizer function applied to the kernel weights matrix of ConvLSTM2D layers. :conv_recurrent_regularizer: Regularizer function applied to the recurrent_kernel weights matrix of ConvLSTM2D layers. :dense_kernel_regularizer: Regularizer function applied to the kernel weights matrix of Dense layers. :lr_plateau: Whether the learning rate should be dynamically decreased when learning stagnates :cache_id: The id of the respective experiment. :return: The fitted model :return: The history of training the model """ current_path = pathlib.Path(__file__).resolve().parent.parent.parent # define a path for the model to be cached to # => use md5 or similar for cache path.. # define a path for the model to be cached to if cache_id is not None: # use the given cache id as a reference (array index) self.cache_path = current_path / '00_CACHE' self.cache_name = f'lstm_{version}_{cache_id}' self.log_path = current_path / '03_EVALUATION/histories' self.log_name = f'lstm_{version}_{cache_id}' # try to read the model version to evaluate from cache # could either be latest or best if evaluate is not None: model = load_model(self.cache_path / (self.cache_name + f'_{evaluate}.h5')) self.model = model return # try to read the model and its history from cache """ if not invalidate and os.path.isfile(self.cache_path + '.h5') and os.path.isfile(self.cache_path + '.history'): model = load_model(self.cache_path + '.h5') self.model = model return """ # calculate the wanted input shape input_shape = (X_train.shape[1], X_train.shape[2], X_train.shape[3], X_train.shape[4]) print(f'> input_shape: {input_shape!s}') # prepare regularizers for later use in the model regularizers = {'L2': l2, 'L1': l1, 'L1_L2': l1_l2} conv_regularize_params = dict() dense_regularize_params = dict() if (conv_kernel_regularizer is not None): reg = regularizers[conv_kernel_regularizer[0]] conv_regularize_params['kernel_regularizer'] = reg( conv_kernel_regularizer[1]) if (conv_recurrent_regularizer is not None): reg = regularizers[conv_recurrent_regularizer[0]] conv_regularize_params['recurrent_regularizer'] = reg( conv_recurrent_regularizer[1]) if (dense_kernel_regularizer is not None): reg = regularizers[dense_kernel_regularizer[0]] dense_regularize_params['kernel_regularizer'] = reg( dense_kernel_regularizer[1]) # start building a sequential model model = Sequential() # go through all convolutional lstm layers for (index, filters) in enumerate(conv_filters): last_layer = index == len(conv_filters) - 1 # add a ConvLSTM2D layer if index == 0: model.add( ConvLSTM2D(filters=filters, kernel_size=(conv_kernels[index], conv_kernels[index]), activation=conv_activation, recurrent_activation=recurrent_activation, padding=padding, input_shape=input_shape, return_sequences=not last_layer, **conv_regularize_params)) else: model.add( ConvLSTM2D(filters=filters, kernel_size=(conv_kernels[index], conv_kernels[index]), activation=conv_activation, recurrent_activation=recurrent_activation, padding=padding, return_sequences=not last_layer, **conv_regularize_params)) # add a pooling layer if configured if conv_pooling[index] > 0: if last_layer: model.add( MaxPooling2D(pool_size=(conv_pooling[index], conv_pooling[index]))) else: model.add( MaxPooling3D(pool_size=(1, conv_pooling[index], conv_pooling[index]))) # add batch normalization if batch_norm: model.add(BatchNormalization()) # add dropout if dropout: model.add(Dropout(dropout)) # add max pooling before flattening to reduce the dimensionality if conv_pooling[len(conv_filters)] > 0: model.add( MaxPooling2D(pool_size=(conv_pooling[len(conv_filters)], conv_pooling[len(conv_filters)]), padding=padding)) # flatten to make data digestible for dense layers model.add(Flatten()) if batch_norm: model.add(BatchNormalization()) # go through all passed dense layers for dense in dense_nodes: # add a new dense layer model.add( Dense(dense, activation=dense_activation, **dense_regularize_params)) # add batch normalization if batch_norm: model.add(BatchNormalization()) # add dropout if dropout: model.add(Dropout(dropout)) # prepare optimizer if optimizer == 'rmsprop': optimizer = RMSprop(lr=learning_rate if learning_rate else 0.001) elif optimizer == 'adam': optimizer = Adam(lr=learning_rate if learning_rate else 0.001) elif optimizer == 'sgd': optimizer = SGD(lr=learning_rate if learning_rate else 0.01) # final dense layer for numerical prediction model.add(Dense(1)) # compile the model model.compile(loss=loss, optimizer=optimizer, metrics=['mean_squared_error', 'mean_absolute_error']) # print an overview about the model print(model.summary()) print('\n') # if early stopping patience is set, configure the callback callbacks = [] if patience > 0: callbacks.append( EarlyStopping(monitor='val_loss', patience=patience)) if tensorboard: callbacks.append( TensorBoard(log_dir=current_path / (f'00_LOGS/{time.time()!s}'))) if lr_plateau: callbacks.append( ReduceLROnPlateau(monitor='val_loss', factor=lr_plateau[0], patience=lr_plateau[1], min_lr=lr_plateau[2])) # always save the best model in a checkpoint file if self.cache_path is not None: best_path = str(self.cache_path / f'{self.cache_name}_best.h5') latest_path = str(self.cache_path / f'{self.cache_name}_latest.h5') callbacks.append( ModelCheckpoint(filepath=best_path, verbose=1, save_best_only=True)) callbacks.append(ModelCheckpoint(filepath=latest_path, verbose=1)) # log results to CSV if self.log_path is not None: log_path = str(self.log_path / f'{self.log_name}.csv') callbacks.append(CSVLogger(log_path)) # fit the model to the training data if validation_data: # always use the data given as validation_data for validation purposes history = model.fit(X_train, y_train, epochs=epochs, validation_data=validation_data, batch_size=batch_size, verbose=verbose, callbacks=callbacks) else: # use a percentage of the years as validation data (and shuffle) history = model.fit(X_train, y_train, epochs=epochs, validation_split=validation_split, batch_size=batch_size, verbose=verbose, callbacks=callbacks) self.model = model self.history = history
def multiinput_resnext(classes=2): # 4 input1:a ======================================================================================================= inputs_1 = Input(shape=(280, 280, 16, 1), name='path1_input1') # 256*256*128 print("path1_input shape:", inputs_1.shape) # (?, 140, 140, 16, 64) out1 = Conv3D(64, 7, strides=(2, 2, 1), padding='same', kernel_initializer='he_normal', use_bias=False, name='path1_conv1')(inputs_1) print("path1_conv0 shape:", out1.shape) #(?, 140, 140, 16, 64) out1 = BatchNormalization(axis=-1, epsilon=1e-6, name='path1_bn1')(out1) out1 = Activation('relu')(out1) out1 = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding='same')(out1) print("path1_pooling1 shape:", out1.shape) #(?, 70, 70, 16, 64) out1 = conv_block(out1, [64, 64, 256], name='path1_L1_block1') print("path1_conv1 shape:", out1.shape) out1 = identity_block(out1, [64, 64, 256], name='path1_L1_block2') out1 = identity_block(out1, [64, 64, 256], name='path1_L1_block3') # 4 input2:v ======================================================================================================= inputs_2 = Input(shape=(280, 280, 16, 1), name='path2_input2') # 256*256*128 print("path2_input shape:", inputs_2.shape) # (?, 140, 140, 16, 64) out2 = Conv3D(64, 7, strides=(2, 2, 1), padding='same', kernel_initializer='he_normal', use_bias=False, name='path2_conv1')(inputs_2) print("path2_conv0 shape:", out1.shape) #(?, 140, 140, 16, 64) out2 = BatchNormalization(axis=-1, epsilon=1e-6, name='path2_bn1')(out2) out2 = Activation('relu')(out2) out2 = MaxPooling3D((3, 3, 3), strides=(2, 2, 1), padding='same')(out2) print("path2_pooling1 shape:", out1.shape) #(?, 70, 70, 16, 64) out2 = conv_block(out2, [64, 64, 256], name='path2_L1_block1') print("path2_conv1 shape:", out2.shape) out2 = identity_block(out2, [64, 64, 256], name='path2_L1_block2') out2 = identity_block(out2, [64, 64, 256], name='path2_L1_block3') #main path:concatenate 'out1' and 'out2' into 'out' ================================================================ out = concatenate([out1, out2], axis=-1) print("concatenate shape:", out.shape) out = conv_block(out, [128, 128, 512], name='L2_block1') print("conv2 shape:", out.shape) out = identity_block(out, [128, 128, 512], name='L2_block2') out = identity_block(out, [128, 128, 512], name='L2_block3') out = identity_block(out, [128, 128, 512], name='L2_block4') out = conv_block(out, [256, 256, 1024], name='L3_block1') print("conv3 shape:", out.shape) out = identity_block(out, [256, 256, 1024], name='L3_block2') out = identity_block(out, [256, 256, 1024], name='L3_block3') out = identity_block(out, [256, 256, 1024], name='L3_block4') out = identity_block(out, [256, 256, 1024], name='L3_block5') out = identity_block(out, [256, 256, 1024], name='L3_block6') out = conv_block(out, [512, 512, 2048], name='L4_block1') print("conv4 shape:", out.shape) out = identity_block(out, [512, 512, 2048], name='L4_block2') out = identity_block(out, [512, 512, 2048], name='L4_block3') out = GlobalAveragePooling3D(data_format='channels_last')(out) print("Gpooling shape:", out.shape) out_drop = Dropout(rate=0.3)(out) out = Dense(classes, name='fc1')(out_drop) print("out shape:", out.shape) output = Activation(activation='sigmoid')(out) model = Model(input=[inputs_1, inputs_2], output=output) #mean_squared_logarithmic_error or binary_crossentropy model.compile(optimizer=SGD(lr=1e-6, momentum=0.9), loss=EuiLoss, metrics=[y_t, y_pre, Acc]) print('im multi_input_ClassNet') return model
# # # Define CHAR Encoder # # # ############################# # Input data # Dimensions are (batch * nb_characters * pixel_width * pixel_height) # Shape is (56443 * 62 * 64 * 64 ) char_encoder_input = Input(shape=(CHARS_IN_FONT, 64, 64, 1), name='Char_Encoder_Input') # 56443 * 62 * 64 * 64 x = char_encoder_input # Convolutional part i = 1 for f in CHAR_CONV_DIMS[1:-1]: x = Conv3D(f, (1, 3, 3), activation='relu', padding='same', name='E_C_Cov_' + str(i))(x) x = MaxPooling3D((1, 2, 2), padding='same', name='E_C_Pool_' + str(i))(x) i += 1 # Beta-VAE part z_c_mean = Conv3D(CHAR_ENCODING_DIM, (1, 3, 3), padding='same', kernel_regularizer=l2(0.01), name='E_C_Enc_Mean_Conv')(x) z_c_mean = MaxPooling3D((1, 2, 2), padding='same', name='E_C_Enc_Mean_Pool')(z_c_mean) z_c_log_var = Conv3D(CHAR_ENCODING_DIM, (1, 3, 3), padding='same', kernel_regularizer=l2(0.01), name='E_C_Enc_Var_Conv')(x) z_c_log_var = MaxPooling3D((1, 2, 2), padding='same', name='E_C_Enc_Var_Pool')(z_c_log_var) z_c_sample = Lambda(sampling, output_shape=(CHARS_IN_FONT, 1, 1, CHAR_ENCODING_DIM,), name='E_C_Enc_Sample')([z_c_mean, z_c_log_var]) kl_params = {'vae_beta': VAE_BETA_CHAR} z_c_mean, z_c_log_var = KLDivergenceLayer(name='Char_VAE', **kl_params)([z_c_mean, z_c_log_var])
# print("concatenate tensor after 1 x 1 convolution = ", concat_2_stream.shape) Conv3D_2_stream = Conv3D(filters=n_filters, # 3d convolution over Time, Width, Height kernel_size=(3, 3, 3), # specifying the depth (<< notice this), height and width of the 3D convolution window. strides=(1, 1, 1), # padding='valid', padding='same', data_format="channels_last", # dilation_rate=(1, 1, 1), activation="relu", use_bias=True)(concat_2_stream) print("Conv3D output tensor" , Conv3D_2_stream) MaxPool3D_2_stream = MaxPooling3D(pool_size=(num_of_snip, 2, 2), strides=(2, 2, 2), padding="valid")(Conv3D_2_stream) # pool3d = tf.layers.max_pooling3d(fusion_conv6, pool_size=[self.nFramesPerVid,2,2], strides=(2,2,2), padding='valid') # [?,1,7,7,512] ?=batchsize print("MaxPool3D output tensor", MaxPool3D_2_stream) # # MaxPool3D_2_stream = Reshape(target_shape=(13 * 13 * 512))(MaxPool3D_2_stream) # # print("MaxPool3D output tensor after reshape", MaxPool3D_2_stream) # x = Flatten(input_shape=MaxPool3D_2_stream.shape[1:])(MaxPool3D_2_stream) x = Flatten()(MaxPool3D_2_stream) print("Flattened tensor", x.shape) # x = Flatten(concat_2_stream) x = Dense(4096)(x) # x = BatchNormalization()(x)
def Unet(input_shape, n_labels, activation='sigmoid'): inputs = Input(input_shape) conv11 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs) conc11 = concatenate([inputs, conv11], axis=4) conv12 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conc11) conc12 = concatenate([inputs, conv12], axis=4) pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conc12) conv21 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1) conc21 = concatenate([pool1, conv21], axis=4) conv22 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conc21) conc22 = concatenate([pool1, conv22], axis=4) pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conc22) conv31 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(pool2) conc31 = concatenate([pool2, conv31], axis=4) conv32 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conc31) conc32 = concatenate([pool2, conv32], axis=4) pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conc32) conv41 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(pool3) conc41 = concatenate([pool3, conv41], axis=4) conv42 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conc41) conc42 = concatenate([pool3, conv42], axis=4) pool4 = MaxPooling3D(pool_size=(2, 2, 2))(conc42) conv51 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(pool4) conc51 = concatenate([pool4, conv51], axis=4) conv52 = Conv3D(512, (3, 3, 3), activation='relu', padding='same')(conc51) conc52 = concatenate([pool4, conv52], axis=4) up6 = concatenate([ Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding='same')(conc52), conc42 ], axis=4) conv61 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(up6) conc61 = concatenate([up6, conv61], axis=4) conv62 = Conv3D(256, (3, 3, 3), activation='relu', padding='same')(conc61) conc62 = concatenate([up6, conv62], axis=4) up7 = concatenate([ Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding='same')(conc62), conv32 ], axis=4) conv71 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(up7) conc71 = concatenate([up7, conv71], axis=4) conv72 = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(conc71) conc72 = concatenate([up7, conv72], axis=4) up8 = concatenate([ Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding='same')(conc72), conv22 ], axis=4) conv81 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up8) conc81 = concatenate([up8, conv81], axis=4) conv82 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conc81) conc82 = concatenate([up8, conv82], axis=4) up9 = concatenate([ Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same')(conc82), conv12 ], axis=4) conv91 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up9) conc91 = concatenate([up9, conv91], axis=4) conv92 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conc91) conc92 = concatenate([up9, conv92], axis=4) conv10 = Conv3D(n_labels, (1, 1, 1), activation=activation)(conc92) model = Model(inputs=[inputs], outputs=[conv10]) return model
K.set_image_dim_ordering('th') ############################################################################################################ ############################# CNN RESNET 50 #################################### ############################################################################################################ input1 = Input(shape=(1, 29, 112, 112)) pad1 = ZeroPadding3D((1, 3, 3))(input1) conv1 = Conv3D(64, (5, 7, 7), name="conv1", strides=(1, 2, 2), padding="valid")(pad1) B1 = BatchNormalization(axis=1)(conv1) act1 = Activation('relu')(B1) padm1 = ZeroPadding3D((0, 1, 1))(act1) m1 = MaxPooling3D((1, 3, 3), strides=(1, 2, 2))(padm1) perm1 = Permute(dims=(2, 1, 3, 4))(m1) Flat1 = Reshape((27, 64 * 28 * 28))(perm1) lin1 = TimeDistributed(Dense(384))(Flat1) B_lin1 = BatchNormalization(axis=-1)(lin1) act_lin1 = Activation('relu')(B_lin1) lin2 = TimeDistributed(Dense(384))(act_lin1) B_lin2 = BatchNormalization(axis=-1)(lin2) act_lin2 = Activation('relu')(B_lin2) lin3 = TimeDistributed(Dense(256))(act_lin2) B_lin3 = BatchNormalization(axis=-1)(lin3) act_lin3 = Activation('relu')(B_lin3)